diff --git a/.gitignore b/.gitignore index 551b1b5361ce2..9b2d400d13c9c 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ *.suo *.vcxproj.user *.patch +*.diff .idea .svn .classpath @@ -51,15 +52,10 @@ patchprocess/ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package-lock.json hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn-error.log -# Ignore files generated by HDDS acceptance tests. -hadoop-ozone/acceptance-test/docker-compose.log -hadoop-ozone/acceptance-test/junit-results.xml - #robotframework outputs log.html output.xml report.html -hadoop-hdds/docs/public .mvn diff --git a/BUILDING.txt b/BUILDING.txt index d3c9a1a7f51ee..d54ce83183846 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -85,7 +85,7 @@ Optional packages: * Linux FUSE $ sudo apt-get install fuse libfuse-dev * ZStandard compression - $ sudo apt-get install zstd + $ sudo apt-get install libzstd1-dev * PMDK library for storage class memory(SCM) as HDFS cache backend Please refer to http://pmem.io/ and https://github.com/pmem/pmdk @@ -104,8 +104,6 @@ Maven main modules: - hadoop-hdfs-project (Hadoop HDFS) - hadoop-yarn-project (Hadoop YARN) - hadoop-mapreduce-project (Hadoop MapReduce) - - hadoop-ozone (Hadoop Ozone) - - hadoop-hdds (Hadoop Distributed Data Store) - hadoop-tools (Hadoop tools like Streaming, Distcp, etc.) - hadoop-dist (Hadoop distribution assembler) - hadoop-client-modules (Hadoop client modules) @@ -419,6 +417,47 @@ Building command example: Note that the command above manually specified the openssl library and include path. This is necessary at least for Homebrewed OpenSSL. + +---------------------------------------------------------------------------------- + +Building on CentOS 8 + +---------------------------------------------------------------------------------- + + +* Install development tools such as GCC, autotools, OpenJDK and Maven. + $ sudo dnf group install --with-optional 'Development Tools' + $ sudo dnf install java-1.8.0-openjdk-devel maven + +* Install Protocol Buffers v3.7.1. + $ git clone https://github.com/protocolbuffers/protobuf + $ cd protobuf + $ git checkout v3.7.1 + $ autoreconf -i + $ ./configure --prefix=/usr/local + $ make + $ sudo make install + $ cd .. + +* Install libraries provided by CentOS 8. + $ sudo dnf install libtirpc-devel zlib-devel lz4-devel bzip2-devel openssl-devel cyrus-sasl-devel libpmem-devel + +* Install optional dependencies (snappy-devel). + $ sudo dnf --enablerepo=PowerTools snappy-devel + +* Install optional dependencies (libzstd-devel). + $ sudo dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + $ sudo dnf --enablerepo=epel install libzstd-devel + +* Install optional dependencies (isa-l). + $ sudo dnf --enablerepo=PowerTools install nasm + $ git clone https://github.com/intel/isa-l + $ cd isa-l/ + $ ./autogen.sh + $ ./configure + $ make + $ sudo make install + ---------------------------------------------------------------------------------- Building on Windows diff --git a/Jenkinsfile b/Jenkinsfile index 11cbb9189cfba..02b9a0eabdff3 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -35,7 +35,7 @@ pipeline { DOCKERFILE = "${SOURCEDIR}/dev-support/docker/Dockerfile" YETUS='yetus' // Branch or tag name. Yetus release tags are 'rel/X.Y.Z' - YETUS_VERSION='rel/0.10.0' + YETUS_VERSION='rel/0.12.0' } parameters { @@ -61,7 +61,7 @@ pipeline { steps { withCredentials( [usernamePassword(credentialsId: 'apache-hadoop-at-github.com', - passwordVariable: 'GITHUB_PASSWORD', + passwordVariable: 'GITHUB_TOKEN', usernameVariable: 'GITHUB_USER'), usernamePassword(credentialsId: 'hadoopqa-at-asf-jira', passwordVariable: 'JIRA_PASSWORD', @@ -105,8 +105,7 @@ pipeline { YETUS_ARGS+=("--html-report-file=${WORKSPACE}/${PATCHDIR}/report.html") # enable writing back to Github - YETUS_ARGS+=(--github-password="${GITHUB_PASSWORD}") - YETUS_ARGS+=(--github-user=${GITHUB_USER}) + YETUS_ARGS+=(--github-token="${GITHUB_TOKEN}") # enable writing back to ASF JIRA YETUS_ARGS+=(--jira-password="${JIRA_PASSWORD}") @@ -135,7 +134,7 @@ pipeline { YETUS_ARGS+=("--plugins=all") # use Hadoop's bundled shelldocs - YETUS_ARGS+=("--shelldocs=/testptch/hadoop/dev-support/bin/shelldocs") + YETUS_ARGS+=("--shelldocs=${WORKSPACE}/${SOURCEDIR}/dev-support/bin/shelldocs") # don't let these tests cause -1s because we aren't really paying that # much attention to them @@ -147,11 +146,14 @@ pipeline { YETUS_ARGS+=("--dockerfile=${DOCKERFILE}") # effectively treat dev-suport as a custom maven module - YETUS_ARGS+=("--skip-dir=dev-support") + YETUS_ARGS+=("--skip-dirs=dev-support") # help keep the ASF boxes clean YETUS_ARGS+=("--sentinel") + # use emoji vote so it is easier to find the broken line + YETUS_ARGS+=("--github-use-emoji-vote") + "${TESTPATCHBIN}" "${YETUS_ARGS[@]}" ''' } diff --git a/LICENSE-binary b/LICENSE-binary index 588b9dc865ebc..dc399df6da7bc 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -254,15 +254,15 @@ commons-lang:commons-lang:2.6 commons-logging:commons-logging:1.1.3 commons-net:commons-net:3.6 de.ruedigermoeller:fst:2.50 -io.grpc:grpc-context:1.15.1 -io.grpc:grpc-core:1.15.1 -io.grpc:grpc-netty:1.15.1 -io.grpc:grpc-protobuf:1.15.1 -io.grpc:grpc-protobuf-lite:1.15.1 -io.grpc:grpc-stub:1.15.1 +io.grpc:grpc-api:1.26.0 +io.grpc:grpc-context:1.26.0 +io.grpc:grpc-core:1.26.0 +io.grpc:grpc-netty:1.26.0 +io.grpc:grpc-protobuf:1.26.0 +io.grpc:grpc-protobuf-lite:1.26.0 +io.grpc:grpc-stub:1.26.0 io.netty:netty:3.10.6.Final -io.netty:netty-all:4.0.52.Final -io.netty:netty-all:4.1.27.Final +io.netty:netty-all:4.1.42.Final io.netty:netty-buffer:4.1.27.Final io.netty:netty-codec:4.1.27.Final io.netty:netty-codec-http:4.1.27.Final @@ -282,7 +282,6 @@ io.swagger:swagger-annotations:1.5.4 javax.inject:javax.inject:1 log4j:log4j:1.2.17 net.java.dev.jna:jna:5.2.0 -net.jpountz.lz4:lz4:1.2.0 net.minidev:accessors-smart:1.2 net.minidev:json-smart:2.3 org.apache.avro:avro:1.7.7 @@ -307,7 +306,7 @@ org.apache.htrace:htrace-core:3.1.0-incubating org.apache.htrace:htrace-core4:4.1.0-incubating org.apache.httpcomponents:httpclient:4.5.6 org.apache.httpcomponents:httpcore:4.4.10 -org.apache.kafka:kafka-clients:0.8.2.1 +org.apache.kafka:kafka-clients:2.4.0 org.apache.kerby:kerb-admin:1.0.1 org.apache.kerby:kerb-client:1.0.1 org.apache.kerby:kerb-common:1.0.1 @@ -345,7 +344,8 @@ org.eclipse.jetty:jetty-xml:9.3.27.v20190418 org.eclipse.jetty.websocket:javax-websocket-client-impl:9.3.27.v20190418 org.eclipse.jetty.websocket:javax-websocket-server-impl:9.3.27.v20190418 org.ehcache:ehcache:3.3.1 -org.objenesis:objenesis:1.0:compile +org.lz4:lz4-java:1.6.0 +org.objenesis:objenesis:2.6 org.xerial.snappy:snappy-java:1.0.5 org.yaml:snakeyaml:1.16: org.wildfly.openssl:wildfly-openssl:1.0.7.Final @@ -364,6 +364,7 @@ hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/com hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/util/tree.h hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/compat/{fstatat|openat|unlinkat}.h +com.github.luben:zstd-jni:1.4.3-1 dnsjava:dnsjava:2.1.7 org.codehaus.woodstox:stax2-api:3.1.4 @@ -384,7 +385,7 @@ com.google.protobuf:protobuf-java:3.6.1 com.google.re2j:re2j:1.1 com.jcraft:jsch:0.1.54 com.thoughtworks.paranamer:paranamer:2.3 -javax.activation:javax.activation-api:1.2.0 +jakarta.activation:jakarta.activation-api:1.2.1 org.fusesource.leveldbjni:leveldbjni-all:1.8 org.jline:jline:3.9.0 org.hamcrest:hamcrest-core:1.3 diff --git a/LICENSE.txt b/LICENSE.txt index d0d57461e76df..c8e90f27f293f 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -256,3 +256,26 @@ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/st hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/TERMINAL + +======= +For hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/cJSON.[ch]: + +Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release index d14c0073a5f17..39a5d0d319837 100755 --- a/dev-support/bin/create-release +++ b/dev-support/bin/create-release @@ -204,6 +204,11 @@ function set_defaults DOCKERFILE="${BASEDIR}/dev-support/docker/Dockerfile" DOCKERRAN=false + CPU_ARCH=$(echo "$MACHTYPE" | cut -d- -f1) + if [ "$CPU_ARCH" = "aarch64" ]; then + DOCKERFILE="${BASEDIR}/dev-support/docker/Dockerfile_aarch64" + fi + # Extract Java version from ${BASEDIR}/pom.xml # doing this outside of maven means we can do this before # the docker container comes up... @@ -249,7 +254,9 @@ function startgpgagent eval $("${GPGAGENT}" --daemon \ --options "${LOGDIR}/gpgagent.conf" \ --log-file="${LOGDIR}/create-release-gpgagent.log") - GPGAGENTPID=$(echo "${GPG_AGENT_INFO}" | cut -f 2 -d:) + GPGAGENTPID=$(pgrep "${GPGAGENT}") + GPG_AGENT_INFO="$HOME/.gnupg/S.gpg-agent:$GPGAGENTPID:1" + export GPG_AGENT_INFO fi if [[ -n "${GPG_AGENT_INFO}" ]]; then @@ -499,7 +506,12 @@ function dockermode # we always force build with the OpenJDK JDK # but with the correct version - echo "ENV JAVA_HOME /usr/lib/jvm/java-${JVM_VERSION}-openjdk-amd64" + if [ "$CPU_ARCH" = "aarch64" ]; then + echo "ENV JAVA_HOME /usr/lib/jvm/java-${JVM_VERSION}-openjdk-arm64" + else + echo "ENV JAVA_HOME /usr/lib/jvm/java-${JVM_VERSION}-openjdk-amd64" + fi + echo "USER ${user_name}" printf "\n\n" ) | docker build -t "${imgname}" - @@ -639,10 +651,12 @@ function signartifacts big_console_header "Signing the release" - for i in ${ARTIFACTS_DIR}/*; do + run cd "${ARTIFACTS_DIR}" + for i in *; do ${GPG} --use-agent --armor --output "${i}.asc" --detach-sig "${i}" sha512sum --tag "${i}" > "${i}.sha512" done + run cd "${BASEDIR}" if [[ "${ASFRELEASE}" = true ]]; then echo "Fetching the Apache Hadoop KEYS file..." diff --git a/dev-support/bin/dist-copynativelibs b/dev-support/bin/dist-copynativelibs index 4a783f086a4dc..ffc82b8fb1b05 100755 --- a/dev-support/bin/dist-copynativelibs +++ b/dev-support/bin/dist-copynativelibs @@ -72,7 +72,7 @@ function bundle_native_bin fi cd "${libdir}" || exit 1 - ${TAR} ./*"${libpattern}"* | (cd "${TARGET_BIN_DIR}"/ || exit 1 ; ${UNTAR}) + ${TAR} ./*"${binpattern}"* | (cd "${TARGET_BIN_DIR}"/ || exit 1 ; ${UNTAR}) if [[ $? -ne 0 ]]; then echo "Bundling bin files for ${binoption} failed" exit 1 @@ -152,13 +152,13 @@ if [[ -d "${LIB_DIR}" ]]; then exit 1 fi - bundle_native_lib "${SNAPPYLIBBUNDLE}" "snappy.lib" "snappy" "${SNAPPYLIB}" + bundle_native_lib "${SNAPPYLIBBUNDLE}" "snappy.lib" "libsnappy." "${SNAPPYLIB}" - bundle_native_lib "${ZSTDLIBBUNDLE}" "zstd.lib" "zstd" "${ZSTDLIB}" + bundle_native_lib "${ZSTDLIBBUNDLE}" "zstd.lib" "libzstd." "${ZSTDLIB}" - bundle_native_lib "${OPENSSLLIBBUNDLE}" "openssl.lib" "crypto" "${OPENSSLLIB}" + bundle_native_lib "${OPENSSLLIBBUNDLE}" "openssl.lib" "libcrypto." "${OPENSSLLIB}" - bundle_native_lib "${ISALBUNDLE}" "isal.lib" "isa" "${ISALLIB}" + bundle_native_lib "${ISALBUNDLE}" "isal.lib" "libisal." "${ISALLIB}" bundle_native_lib "${PMDKBUNDLE}" "pmdk.lib" "pmdk" "${PMDKLIB}" fi diff --git a/dev-support/bin/dist-layout-stitching b/dev-support/bin/dist-layout-stitching index 20e8cf27805e2..d4bfd8aaada3b 100755 --- a/dev-support/bin/dist-layout-stitching +++ b/dev-support/bin/dist-layout-stitching @@ -21,9 +21,6 @@ VERSION=$1 # project.build.directory BASEDIR=$2 -#hdds.version -HDDS_VERSION=$3 - function run() { declare res diff --git a/dev-support/bin/yetus-wrapper b/dev-support/bin/yetus-wrapper index b0f71f105d85e..bca2316ae6784 100755 --- a/dev-support/bin/yetus-wrapper +++ b/dev-support/bin/yetus-wrapper @@ -144,7 +144,7 @@ else exit 1 fi -if [[ -n "${GPGBIN}" ]]; then +if [[ -n "${GPGBIN}" && ! "${HADOOP_SKIP_YETUS_VERIFICATION}" = true ]]; then if ! mkdir -p .gpg; then yetus_error "ERROR: yetus-dl: Unable to create ${HADOOP_PATCHPROCESS}/.gpg" exit 1 diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 65cada2784df9..5bd867f2f56c1 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -18,7 +18,7 @@ # Dockerfile for installing the necessary dependencies for building Hadoop. # See BUILDING.txt. -FROM ubuntu:xenial +FROM ubuntu:bionic WORKDIR /root @@ -44,9 +44,11 @@ ENV DEBCONF_TERSE true RUN apt-get -q update \ && apt-get -q install -y --no-install-recommends \ apt-utils \ + bats \ build-essential \ bzip2 \ clang \ + cmake \ curl \ doxygen \ fuse \ @@ -62,6 +64,7 @@ RUN apt-get -q update \ libsasl2-dev \ libsnappy-dev \ libssl-dev \ + libsnappy-dev \ libtool \ libzstd1-dev \ locales \ @@ -75,8 +78,8 @@ RUN apt-get -q update \ python-setuptools \ python-wheel \ rsync \ + shellcheck \ software-properties-common \ - snappy \ sudo \ valgrind \ zlib1g-dev \ @@ -93,20 +96,8 @@ RUN apt-get -q update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* - ###### -# Install cmake 3.1.0 (3.5.1 ships with Xenial) -###### -RUN mkdir -p /opt/cmake \ - && curl -L -s -S \ - https://cmake.org/files/v3.1/cmake-3.1.0-Linux-x86_64.tar.gz \ - -o /opt/cmake.tar.gz \ - && tar xzf /opt/cmake.tar.gz --strip-components 1 -C /opt/cmake -ENV CMAKE_HOME /opt/cmake -ENV PATH "${PATH}:/opt/cmake/bin" - -###### -# Install Google Protobuf 3.7.1 (2.6.0 ships with Xenial) +# Install Google Protobuf 3.7.1 (3.0.0 ships with Bionic) ###### # hadolint ignore=DL3003 RUN mkdir -p /opt/protobuf-src \ @@ -123,7 +114,7 @@ ENV PROTOBUF_HOME /opt/protobuf ENV PATH "${PATH}:/opt/protobuf/bin" ###### -# Install Apache Maven 3.3.9 (3.3.9 ships with Xenial) +# Install Apache Maven 3.6.0 (3.6.0 ships with Bionic) ###### # hadolint ignore=DL3008 RUN apt-get -q update \ @@ -131,9 +122,11 @@ RUN apt-get -q update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* ENV MAVEN_HOME /usr +# JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) +ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64 ###### -# Install findbugs 3.0.1 (3.0.1 ships with Xenial) +# Install findbugs 3.1.0 (3.1.0 ships with Bionic) # Ant is needed for findbugs ###### # hadolint ignore=DL3008 @@ -143,30 +136,13 @@ RUN apt-get -q update \ && rm -rf /var/lib/apt/lists/* ENV FINDBUGS_HOME /usr -#### -# Install shellcheck (0.4.6, the latest as of 2017-09-26) -#### -# hadolint ignore=DL3008 -RUN add-apt-repository -y ppa:jonathonf/ghc-8.0.2 \ - && apt-get -q update \ - && apt-get -q install -y --no-install-recommends shellcheck \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -#### -# Install bats (0.4.0, the latest as of 2017-09-26, ships with Xenial) -#### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends bats \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - #### # Install pylint at fixed version (2.0.0 removed python2 support) # https://github.com/PyCQA/pylint/issues/2294 #### -RUN pip2 install pylint==1.9.2 +RUN pip2 install \ + configparser==4.0.2 \ + pylint==1.9.2 #### # Install dateutil.parser @@ -174,16 +150,23 @@ RUN pip2 install pylint==1.9.2 RUN pip2 install python-dateutil==2.7.3 ### -# Install node.js for web UI framework (4.2.6 ships with Xenial) +# Install node.js 8.17.0 for web UI framework (4.2.6 ships with Xenial) ### -# hadolint ignore=DL3008, DL3016 -RUN apt-get -q update \ - && apt-get install -y --no-install-recommends nodejs npm \ +RUN curl -L -s -S https://deb.nodesource.com/setup_8.x | bash - \ + && apt-get install -y --no-install-recommends nodejs=8.17.0-1nodesource1 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* \ - && ln -s /usr/bin/nodejs /usr/bin/node \ - && npm install npm@latest -g \ - && npm install -g jshint + && npm install -g bower@1.8.8 + +### +## Install Yarn 1.12.1 for web UI framework +#### +RUN curl -s -S https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \ + && echo 'deb https://dl.yarnpkg.com/debian/ stable main' > /etc/apt/sources.list.d/yarn.list \ + && apt-get -q update \ + && apt-get install -y --no-install-recommends yarn=1.21.1-1 \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* ### # Install hadolint @@ -200,13 +183,16 @@ RUN curl -L -s -S \ ### ENV MAVEN_OPTS -Xms256m -Xmx1536m +# Skip gpg verification when downloading Yetus via yetus-wrapper +ENV HADOOP_SKIP_YETUS_VERIFICATION true + ### # Everything past this point is either not needed for testing or breaks Yetus. # So tell Yetus not to read the rest of the file: # YETUS CUT HERE ### -# Hugo static website generator (for new hadoop site and Ozone docs) +# Hugo static website generator for new hadoop site RUN curl -L -o hugo.deb https://github.com/gohugoio/hugo/releases/download/v0.58.3/hugo_0.58.3_Linux-64bit.deb \ && dpkg --install hugo.deb \ && rm hugo.deb diff --git a/dev-support/docker/Dockerfile_aarch64 b/dev-support/docker/Dockerfile_aarch64 new file mode 100644 index 0000000000000..d0cfa5a2fa24f --- /dev/null +++ b/dev-support/docker/Dockerfile_aarch64 @@ -0,0 +1,240 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Dockerfile for installing the necessary dependencies for building Hadoop. +# See BUILDING.txt. + +FROM ubuntu:xenial + +WORKDIR /root + +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +##### +# Disable suggests/recommends +##### +RUN echo APT::Install-Recommends "0"\; > /etc/apt/apt.conf.d/10disableextras +RUN echo APT::Install-Suggests "0"\; >> /etc/apt/apt.conf.d/10disableextras + +ENV DEBIAN_FRONTEND noninteractive +ENV DEBCONF_TERSE true + +###### +# Install common dependencies from packages. Versions here are either +# sufficient or irrelevant. +# +# WARNING: DO NOT PUT JAVA APPS HERE! Otherwise they will install default +# Ubuntu Java. See Java section below! +###### +# hadolint ignore=DL3008 +RUN apt-get -q update \ + && apt-get -q install -y --no-install-recommends \ + apt-utils \ + build-essential \ + bzip2 \ + clang \ + curl \ + doxygen \ + fuse \ + g++ \ + gcc \ + git \ + gnupg-agent \ + libbz2-dev \ + libcurl4-openssl-dev \ + libfuse-dev \ + libprotobuf-dev \ + libprotoc-dev \ + libsasl2-dev \ + libsnappy-dev \ + libssl-dev \ + libtool \ + libzstd1-dev \ + locales \ + make \ + pinentry-curses \ + pkg-config \ + python \ + python2.7 \ + python-pip \ + python-pkg-resources \ + python-setuptools \ + python-wheel \ + rsync \ + software-properties-common \ + snappy \ + sudo \ + valgrind \ + zlib1g-dev \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + + +####### +# OpenJDK 8 +####### +# hadolint ignore=DL3008 +RUN apt-get -q update \ + && apt-get -q install -y --no-install-recommends openjdk-8-jdk libbcprov-java \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + + +###### +# Install cmake 3.1.0 (3.5.1 ships with Xenial) +# There is no cmake binary available for aarch64. Build from source. +###### +# hadolint ignore=DL3003 +RUN mkdir -p /opt/cmake/src \ + && curl -L -s -S \ + https://cmake.org/files/v3.1/cmake-3.1.0-1-src.tar.bz2 \ + -o /opt/cmake/cmake-src.tar.bz2 \ + && tar xvjf /opt/cmake/cmake-src.tar.bz2 -C /opt/cmake/src \ + && cd /opt/cmake/src \ + && tar xvjf cmake-3.1.0.tar.bz2 \ + && cd cmake-3.1.0 && patch -p0 -i ../cmake-3.1.0-1.patch && mkdir .build && cd .build \ + && ../bootstrap --parallel=2 \ + && make -j2 && ./bin/cpack \ + && tar xzf cmake-3.1.0-Linux-aarch64.tar.gz --strip-components 1 -C /opt/cmake \ + && cd /opt/cmake && rm -rf /opt/cmake/src +ENV CMAKE_HOME /opt/cmake +ENV PATH "${PATH}:/opt/cmake/bin" + +###### +# Install Google Protobuf 3.7.1 (2.6.0 ships with Xenial) +###### +# hadolint ignore=DL3003 +RUN mkdir -p /opt/protobuf-src \ + && curl -L -s -S \ + https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \ + -o /opt/protobuf.tar.gz \ + && tar xzf /opt/protobuf.tar.gz --strip-components 1 -C /opt/protobuf-src \ + && cd /opt/protobuf-src \ + && ./configure --prefix=/opt/protobuf \ + && make install \ + && cd /root \ + && rm -rf /opt/protobuf-src +ENV PROTOBUF_HOME /opt/protobuf +ENV PATH "${PATH}:/opt/protobuf/bin" + +###### +# Install Apache Maven 3.3.9 (3.3.9 ships with Xenial) +###### +# hadolint ignore=DL3008 +RUN apt-get -q update \ + && apt-get -q install -y --no-install-recommends maven \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* +ENV MAVEN_HOME /usr + +###### +# Install findbugs 3.0.1 (3.0.1 ships with Xenial) +# Ant is needed for findbugs +###### +# hadolint ignore=DL3008 +RUN apt-get -q update \ + && apt-get -q install -y --no-install-recommends findbugs ant \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* +ENV FINDBUGS_HOME /usr + +#### +# Install shellcheck (0.4.6, the latest as of 2017-09-26) +#### +# hadolint ignore=DL3008 +RUN add-apt-repository -y ppa:hvr/ghc \ + && apt-get -q update \ + && apt-get -q install -y --no-install-recommends shellcheck \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +#### +# Install bats (0.4.0, the latest as of 2017-09-26, ships with Xenial) +#### +# hadolint ignore=DL3008 +RUN apt-get -q update \ + && apt-get -q install -y --no-install-recommends bats \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +#### +# Install pylint at fixed version (2.0.0 removed python2 support) +# https://github.com/PyCQA/pylint/issues/2294 +#### +RUN pip2 install \ + configparser==4.0.2 \ + pylint==1.9.2 + +#### +# Install dateutil.parser +#### +RUN pip2 install python-dateutil==2.7.3 + +### +# Install node.js 8.17.0 for web UI framework (4.2.6 ships with Xenial) +### +RUN curl -L -s -S https://deb.nodesource.com/setup_8.x | bash - \ + && apt-get install -y --no-install-recommends nodejs=8.17.0-1nodesource1 \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && npm install -g bower@1.8.8 + +### +## Install Yarn 1.12.1 for web UI framework +#### +RUN curl -s -S https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \ + && echo 'deb https://dl.yarnpkg.com/debian/ stable main' > /etc/apt/sources.list.d/yarn.list \ + && apt-get -q update \ + && apt-get install -y --no-install-recommends yarn=1.21.1-1 \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +### +# Install phantomjs built for aarch64 +#### +RUN mkdir -p /opt/phantomjs \ + && curl -L -s -S \ + https://github.com/liusheng/phantomjs/releases/download/2.1.1/phantomjs-2.1.1-linux-aarch64.tar.bz2 \ + -o /opt/phantomjs/phantomjs-2.1.1-linux-aarch64.tar.bz2 \ + && tar xvjf /opt/phantomjs/phantomjs-2.1.1-linux-aarch64.tar.bz2 --strip-components 1 -C /opt/phantomjs \ + && cp /opt/phantomjs/bin/phantomjs /usr/bin/ \ + && rm -rf /opt/phantomjs + +### +# Avoid out of memory errors in builds +### +ENV MAVEN_OPTS -Xms256m -Xmx1536m + +# Skip gpg verification when downloading Yetus via yetus-wrapper +ENV HADOOP_SKIP_YETUS_VERIFICATION true + +### +# Everything past this point is either not needed for testing or breaks Yetus. +# So tell Yetus not to read the rest of the file: +# YETUS CUT HERE +### + +# Hugo static website generator (for new hadoop site docs) +RUN curl -L -o hugo.deb https://github.com/gohugoio/hugo/releases/download/v0.58.3/hugo_0.58.3_Linux-ARM64.deb \ + && dpkg --install hugo.deb \ + && rm hugo.deb + + +# Add a welcome message and environment checks. +COPY hadoop_env_checks.sh /root/hadoop_env_checks.sh +RUN chmod 755 /root/hadoop_env_checks.sh +# hadolint ignore=SC2016 +RUN echo '${HOME}/hadoop_env_checks.sh' >> /root/.bashrc diff --git a/hadoop-assemblies/pom.xml b/hadoop-assemblies/pom.xml index b0fd7325c6eb1..7b709fe29086d 100644 --- a/hadoop-assemblies/pom.xml +++ b/hadoop-assemblies/pom.xml @@ -23,11 +23,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../hadoop-project hadoop-assemblies - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT Apache Hadoop Assemblies Apache Hadoop Assemblies diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-submarine.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-submarine.xml deleted file mode 100644 index b1e039fd501bd..0000000000000 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-submarine.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - hadoop-src - - tar.gz - - true - - - . - - LICENCE.txt - README.txt - NOTICE.txt - - - - . - true - - .git/** - **/.gitignore - **/.svn - **/*.iws - **/*.ipr - **/*.iml - **/.classpath - **/.project - **/.settings - **/target/** - - **/*.log - **/build/** - **/file:/** - **/SecurityAuth.audit* - - - - diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml index b47b4bcc333fb..871694209393f 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml @@ -56,9 +56,7 @@ **/build/** **/file:/** **/SecurityAuth.audit* - hadoop-ozone/** - hadoop-hdds/** - hadoop-submarine/** + patchprocess/** diff --git a/hadoop-build-tools/pom.xml b/hadoop-build-tools/pom.xml index ed4c0ef9ce9ff..584d1fee281ba 100644 --- a/hadoop-build-tools/pom.xml +++ b/hadoop-build-tools/pom.xml @@ -18,7 +18,7 @@ hadoop-main org.apache.hadoop - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT 4.0.0 hadoop-build-tools diff --git a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml index 32f83e7a5886a..8f3d3f13824ef 100644 --- a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml +++ b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml @@ -67,6 +67,10 @@ + + + + @@ -122,7 +126,6 @@ - diff --git a/hadoop-client-modules/hadoop-client-api/pom.xml b/hadoop-client-modules/hadoop-client-api/pom.xml index 7ee7b85fec937..48bb56efe62d9 100644 --- a/hadoop-client-modules/hadoop-client-api/pom.xml +++ b/hadoop-client-modules/hadoop-client-api/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project hadoop-client-api - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT jar Apache Hadoop Client diff --git a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml index 757b374ec0ab0..144f2a66ff7d7 100644 --- a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml +++ b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project hadoop-client-check-invariants - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT pom diff --git a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml index 08b4fb27befd9..1a5d27ce213aa 100644 --- a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml +++ b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project hadoop-client-check-test-invariants - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT pom diff --git a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml index 1a14549250c3e..e58971b25f00c 100644 --- a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml +++ b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project hadoop-client-integration-tests - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT Checks that we can use the generated artifacts Apache Hadoop Client Packaging Integration Tests diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml index 52595d93523e9..b447eedf1349f 100644 --- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml +++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project hadoop-client-minicluster - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT jar Apache Hadoop Minicluster for Clients diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml index 552cd9c1d88c7..fe95ed8688548 100644 --- a/hadoop-client-modules/hadoop-client-runtime/pom.xml +++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project hadoop-client-runtime - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT jar Apache Hadoop Client @@ -339,6 +339,13 @@ **/pom.xml + + javax/xml/bind/ + ${shaded.dependency.prefix}.javax.xml.bind. + + **/pom.xml + + net/ ${shaded.dependency.prefix}.net. diff --git a/hadoop-client-modules/hadoop-client/pom.xml b/hadoop-client-modules/hadoop-client/pom.xml index 9216a2e54a397..dced359b286d9 100644 --- a/hadoop-client-modules/hadoop-client/pom.xml +++ b/hadoop-client-modules/hadoop-client/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project-dist - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project-dist hadoop-client - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT Apache Hadoop Client aggregation pom with dependencies exposed Apache Hadoop Client Aggregator diff --git a/hadoop-client-modules/pom.xml b/hadoop-client-modules/pom.xml index 0895e31ca307f..fb4aedb0aeb43 100644 --- a/hadoop-client-modules/pom.xml +++ b/hadoop-client-modules/pom.xml @@ -18,7 +18,7 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../hadoop-project hadoop-client-modules diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml index b5e35b079f9fd..11b092674cf4f 100644 --- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project hadoop-cloud-storage - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT jar Apache Hadoop Cloud Storage @@ -128,5 +128,10 @@ hadoop-openstack compile + + org.apache.hadoop + hadoop-cos + compile + diff --git a/hadoop-cloud-storage-project/hadoop-cos/dev-support/findbugs-exclude.xml b/hadoop-cloud-storage-project/hadoop-cos/dev-support/findbugs-exclude.xml index 40d78d0cd6cec..e647e678a07a6 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/dev-support/findbugs-exclude.xml +++ b/hadoop-cloud-storage-project/hadoop-cos/dev-support/findbugs-exclude.xml @@ -15,4 +15,9 @@ limitations under the License. --> + + + + h_LIB + diff --git a/hadoop-cloud-storage-project/hadoop-cos/pom.xml b/hadoop-cloud-storage-project/hadoop-cos/pom.xml index 839bd04c9b643..d18b09f450408 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-cos/pom.xml @@ -20,7 +20,7 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project hadoop-cos @@ -81,6 +81,22 @@ 3600 + + org.apache.maven.plugins + maven-dependency-plugin + + + deplist + compile + + list + + + ${project.basedir}/target/hadoop-cloud-storage-deps/${project.artifactId}.cloud-storage-optional.txt + + + + @@ -93,8 +109,8 @@ com.qcloud - cos_api - 5.4.9 + cos_api-bundle + 5.6.19 compile diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/BufferPool.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/BufferPool.java index a4ee4d5be9ac8..409c9cb42f966 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/BufferPool.java +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/BufferPool.java @@ -63,32 +63,27 @@ private BufferPool() { private File createDir(String dirPath) throws IOException { File dir = new File(dirPath); - if (null != dir) { - if (!dir.exists()) { - LOG.debug("Buffer dir: [{}] does not exists. create it first.", - dirPath); - if (dir.mkdirs()) { - if (!dir.setWritable(true) || !dir.setReadable(true) - || !dir.setExecutable(true)) { - LOG.warn("Set the buffer dir: [{}]'s permission [writable," - + "readable, executable] failed.", dir.getAbsolutePath()); - } - LOG.debug("Buffer dir: [{}] is created successfully.", - dir.getAbsolutePath()); - } else { - // Once again, check if it has been created successfully. - // Prevent problems created by multiple processes at the same time. - if (!dir.exists()) { - throw new IOException("buffer dir:" + dir.getAbsolutePath() - + " is created unsuccessfully"); - } + if (!dir.exists()) { + LOG.debug("Buffer dir: [{}] does not exists. create it first.", + dirPath); + if (dir.mkdirs()) { + if (!dir.setWritable(true) || !dir.setReadable(true) + || !dir.setExecutable(true)) { + LOG.warn("Set the buffer dir: [{}]'s permission [writable," + + "readable, executable] failed.", dir.getAbsolutePath()); } + LOG.debug("Buffer dir: [{}] is created successfully.", + dir.getAbsolutePath()); } else { - LOG.debug("buffer dir: {} already exists.", dirPath); + // Once again, check if it has been created successfully. + // Prevent problems created by multiple processes at the same time. + if (!dir.exists()) { + throw new IOException("buffer dir:" + dir.getAbsolutePath() + + " is created unsuccessfully"); + } } } else { - throw new IOException("creating buffer dir: " + dir.getAbsolutePath() - + "unsuccessfully."); + LOG.debug("buffer dir: {} already exists.", dirPath); } return dir; diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileReadTask.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileReadTask.java index a5dcdda07120b..249e9e1ade82a 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileReadTask.java +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileReadTask.java @@ -80,7 +80,6 @@ public CosNFileReadTask( public void run() { int retries = 0; RetryPolicy.RetryAction retryAction; - LOG.info(Thread.currentThread().getName() + "read ..."); try { this.readBuffer.lock(); do { diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNUtils.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNUtils.java index 39981caba24bb..cdac15ffc619e 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNUtils.java +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNUtils.java @@ -22,15 +22,16 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Modifier; +import java.net.URI; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.qcloud.cos.auth.COSCredentialsProvider; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.cosn.auth.COSCredentialProviderList; -import org.apache.hadoop.fs.cosn.auth.EnvironmentVariableCredentialProvider; -import org.apache.hadoop.fs.cosn.auth.SimpleCredentialProvider; +import org.apache.hadoop.fs.cosn.auth.COSCredentialsProviderList; +import org.apache.hadoop.fs.cosn.auth.EnvironmentVariableCredentialsProvider; +import org.apache.hadoop.fs.cosn.auth.SimpleCredentialsProvider; /** * Utility methods for CosN code. @@ -48,21 +49,23 @@ public final class CosNUtils { private CosNUtils() { } - public static COSCredentialProviderList createCosCredentialsProviderSet( + public static COSCredentialsProviderList createCosCredentialsProviderSet( + URI uri, Configuration conf) throws IOException { - COSCredentialProviderList credentialProviderList = - new COSCredentialProviderList(); + COSCredentialsProviderList credentialProviderList = + new COSCredentialsProviderList(); Class[] cosClasses = CosNUtils.loadCosProviderClasses( conf, CosNConfigKeys.COSN_CREDENTIALS_PROVIDER); if (0 == cosClasses.length) { - credentialProviderList.add(new SimpleCredentialProvider(conf)); - credentialProviderList.add(new EnvironmentVariableCredentialProvider()); + credentialProviderList.add( + new SimpleCredentialsProvider(uri, conf)); + credentialProviderList.add( + new EnvironmentVariableCredentialsProvider(uri, conf)); } else { for (Class credClass : cosClasses) { - credentialProviderList.add(createCOSCredentialProvider( - conf, + credentialProviderList.add(createCOSCredentialProvider(uri, conf, credClass)); } } @@ -83,16 +86,17 @@ public static Class[] loadCosProviderClasses( } public static COSCredentialsProvider createCOSCredentialProvider( + URI uri, Configuration conf, Class credClass) throws IOException { COSCredentialsProvider credentialsProvider; if (!COSCredentialsProvider.class.isAssignableFrom(credClass)) { - throw new IllegalArgumentException( - "class " + credClass + " " + NOT_COS_CREDENTIAL_PROVIDER); + throw new IllegalArgumentException("class " + credClass + " " + + NOT_COS_CREDENTIAL_PROVIDER); } if (Modifier.isAbstract(credClass.getModifiers())) { - throw new IllegalArgumentException( - "class " + credClass + " " + ABSTRACT_CREDENTIAL_PROVIDER); + throw new IllegalArgumentException("class " + credClass + " " + + ABSTRACT_CREDENTIAL_PROVIDER); } LOG.debug("Credential Provider class: " + credClass.getName()); @@ -112,8 +116,18 @@ public static COSCredentialsProvider createCOSCredentialProvider( return credentialsProvider; } - Method factory = getFactoryMethod( - credClass, COSCredentialsProvider.class, "getInstance"); + // new credClass(uri, conf) + constructor = getConstructor(credClass, URI.class, + Configuration.class); + if (null != constructor) { + credentialsProvider = + (COSCredentialsProvider) constructor.newInstance(uri, + conf); + return credentialsProvider; + } + + Method factory = getFactoryMethod(credClass, + COSCredentialsProvider.class, "getInstance"); if (null != factory) { credentialsProvider = (COSCredentialsProvider) factory.invoke(null); return credentialsProvider; diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNativeFileSystemStore.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNativeFileSystemStore.java index 833f42d7be6e7..d2484c0e47b3c 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNativeFileSystemStore.java +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNativeFileSystemStore.java @@ -34,6 +34,7 @@ import com.qcloud.cos.ClientConfig; import com.qcloud.cos.auth.BasicCOSCredentials; import com.qcloud.cos.auth.COSCredentials; +import com.qcloud.cos.endpoint.SuffixEndpointBuilder; import com.qcloud.cos.exception.CosClientException; import com.qcloud.cos.exception.CosServiceException; import com.qcloud.cos.http.HttpProtocol; @@ -64,7 +65,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.cosn.auth.COSCredentialProviderList; +import org.apache.hadoop.fs.cosn.auth.COSCredentialsProviderList; import org.apache.hadoop.util.VersionInfo; import org.apache.http.HttpStatus; @@ -89,9 +90,9 @@ class CosNativeFileSystemStore implements NativeFileSystemStore { * @throws IOException Initialize the COS client failed, * caused by incorrect options. */ - private void initCOSClient(Configuration conf) throws IOException { - COSCredentialProviderList credentialProviderList = - CosNUtils.createCosCredentialsProviderSet(conf); + private void initCOSClient(URI uri, Configuration conf) throws IOException { + COSCredentialsProviderList credentialProviderList = + CosNUtils.createCosCredentialsProviderSet(uri, conf); String region = conf.get(CosNConfigKeys.COSN_REGION_KEY); String endpointSuffix = conf.get( CosNConfigKeys.COSN_ENDPOINT_SUFFIX_KEY); @@ -113,7 +114,7 @@ private void initCOSClient(Configuration conf) throws IOException { ClientConfig config; if (null == region) { config = new ClientConfig(new Region("")); - config.setEndPointSuffix(endpointSuffix); + config.setEndpointBuilder(new SuffixEndpointBuilder(endpointSuffix)); } else { config = new ClientConfig(new Region(region)); } @@ -146,7 +147,7 @@ private void initCOSClient(Configuration conf) throws IOException { @Override public void initialize(URI uri, Configuration conf) throws IOException { try { - initCOSClient(conf); + initCOSClient(uri, conf); this.bucketName = uri.getHost(); } catch (Exception e) { handleException(e, ""); @@ -174,8 +175,8 @@ private void storeFileWithRetry(String key, InputStream inputStream, PutObjectResult putObjectResult = (PutObjectResult) callCOSClientWithRetry(putObjectRequest); - LOG.debug("Store file successfully. COS key: [{}], ETag: [{}], " - + "MD5: [{}].", key, putObjectResult.getETag(), new String(md5Hash)); + LOG.debug("Store file successfully. COS key: [{}], ETag: [{}].", + key, putObjectResult.getETag()); } catch (Exception e) { String errMsg = String.format("Store file failed. COS key: [%s], " + "exception: [%s]", key, e.toString()); @@ -196,8 +197,7 @@ private void storeFileWithRetry(String key, InputStream inputStream, public void storeFile(String key, File file, byte[] md5Hash) throws IOException { LOG.info("Store file from local path: [{}]. file length: [{}] COS key: " + - "[{}] MD5: [{}].", file.getCanonicalPath(), file.length(), key, - new String(md5Hash)); + "[{}]", file.getCanonicalPath(), file.length(), key); storeFileWithRetry(key, new BufferedInputStream(new FileInputStream(file)), md5Hash, file.length()); } @@ -218,7 +218,7 @@ public void storeFile( byte[] md5Hash, long contentLength) throws IOException { LOG.info("Store file from input stream. COS key: [{}], " - + "length: [{}], MD5: [{}].", key, contentLength, md5Hash); + + "length: [{}].", key, contentLength); storeFileWithRetry(key, inputStream, md5Hash, contentLength); } @@ -250,7 +250,11 @@ public void storeEmptyFile(String key) throws IOException { public PartETag uploadPart(File file, String key, String uploadId, int partNum) throws IOException { InputStream inputStream = new FileInputStream(file); - return uploadPart(inputStream, key, uploadId, partNum, file.length()); + try { + return uploadPart(inputStream, key, uploadId, partNum, file.length()); + } finally { + inputStream.close(); + } } @Override diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/AbstractCOSCredentialsProvider.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/AbstractCOSCredentialsProvider.java new file mode 100644 index 0000000000000..1363a7934cba0 --- /dev/null +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/AbstractCOSCredentialsProvider.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.cosn.auth; + +import com.qcloud.cos.auth.COSCredentialsProvider; +import org.apache.hadoop.conf.Configuration; + +import javax.annotation.Nullable; +import java.net.URI; + +/** + * The base class for COS credential providers which take a URI or + * configuration in their constructor. + */ +public abstract class AbstractCOSCredentialsProvider + implements COSCredentialsProvider { + private final URI uri; + private final Configuration conf; + + public AbstractCOSCredentialsProvider(@Nullable URI uri, + Configuration conf) { + this.uri = uri; + this.conf = conf; + } + + public URI getUri() { + return uri; + } + + public Configuration getConf() { + return conf; + } +} \ No newline at end of file diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialProviderList.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialProviderList.java deleted file mode 100644 index e900b997e4858..0000000000000 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialProviderList.java +++ /dev/null @@ -1,139 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.cosn.auth; - -import java.io.Closeable; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -import com.google.common.base.Preconditions; -import com.qcloud.cos.auth.AnonymousCOSCredentials; -import com.qcloud.cos.auth.COSCredentials; -import com.qcloud.cos.auth.COSCredentialsProvider; -import com.qcloud.cos.exception.CosClientException; -import com.qcloud.cos.utils.StringUtils; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * a list of cos credentials provider. - */ -public class COSCredentialProviderList implements - COSCredentialsProvider, AutoCloseable { - private static final Logger LOG = - LoggerFactory.getLogger(COSCredentialProviderList.class); - - private static final String NO_COS_CREDENTIAL_PROVIDERS = - "No COS Credential Providers"; - private static final String CREDENTIALS_REQUESTED_WHEN_CLOSED = - "Credentials requested after provider list was closed"; - - private final List providers = - new ArrayList<>(1); - private boolean reuseLastProvider = true; - private COSCredentialsProvider lastProvider; - - private final AtomicInteger refCount = new AtomicInteger(1); - private final AtomicBoolean isClosed = new AtomicBoolean(false); - - public COSCredentialProviderList() { - } - - public COSCredentialProviderList( - Collection providers) { - this.providers.addAll(providers); - } - - public void add(COSCredentialsProvider provider) { - this.providers.add(provider); - } - - public int getRefCount() { - return this.refCount.get(); - } - - public void checkNotEmpty() { - if (this.providers.isEmpty()) { - throw new NoAuthWithCOSException(NO_COS_CREDENTIAL_PROVIDERS); - } - } - - public COSCredentialProviderList share() { - Preconditions.checkState(!this.closed(), "Provider list is closed"); - this.refCount.incrementAndGet(); - return this; - } - - public boolean closed() { - return this.isClosed.get(); - } - - @Override - public COSCredentials getCredentials() { - if (this.closed()) { - throw new NoAuthWithCOSException(CREDENTIALS_REQUESTED_WHEN_CLOSED); - } - - this.checkNotEmpty(); - - if (this.reuseLastProvider && this.lastProvider != null) { - return this.lastProvider.getCredentials(); - } - - for (COSCredentialsProvider provider : this.providers) { - try { - COSCredentials credentials = provider.getCredentials(); - if (!StringUtils.isNullOrEmpty(credentials.getCOSAccessKeyId()) - && !StringUtils.isNullOrEmpty(credentials.getCOSSecretKey()) - || credentials instanceof AnonymousCOSCredentials) { - this.lastProvider = provider; - return credentials; - } - } catch (CosClientException e) { - LOG.warn("No credentials provided by {}: {}", provider, e.toString()); - } - } - - throw new NoAuthWithCOSException( - "No COS Credentials provided by " + this.providers.toString()); - } - - @Override - public void close() throws Exception { - if (this.closed()) { - return; - } - - int remainder = this.refCount.decrementAndGet(); - if (remainder != 0) { - return; - } - this.isClosed.set(true); - - for (COSCredentialsProvider provider : this.providers) { - if (provider instanceof Closeable) { - ((Closeable) provider).close(); - } - } - } - -} diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialsProviderList.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialsProviderList.java new file mode 100644 index 0000000000000..e4c59a5a27611 --- /dev/null +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialsProviderList.java @@ -0,0 +1,145 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.cosn.auth; + +import java.io.Closeable; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import com.google.common.base.Preconditions; +import com.qcloud.cos.auth.AnonymousCOSCredentials; +import com.qcloud.cos.auth.COSCredentials; +import com.qcloud.cos.auth.COSCredentialsProvider; +import com.qcloud.cos.utils.StringUtils; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * a list of cos credentials provider. + */ +public class COSCredentialsProviderList implements + COSCredentialsProvider, AutoCloseable { + private static final Logger LOG = + LoggerFactory.getLogger(COSCredentialsProviderList.class); + + private static final String NO_COS_CREDENTIAL_PROVIDERS = + "No COS Credential Providers"; + private static final String CREDENTIALS_REQUESTED_WHEN_CLOSED = + "Credentials requested after provider list was closed"; + + private final List providers = + new ArrayList(1); + private boolean reuseLastProvider = true; + private COSCredentialsProvider lastProvider; + + private final AtomicInteger refCount = new AtomicInteger(1); + private final AtomicBoolean isClosed = new AtomicBoolean(false); + + public COSCredentialsProviderList() { + } + + public COSCredentialsProviderList( + Collection providers) { + this.providers.addAll(providers); + } + + public void add(COSCredentialsProvider provider) { + this.providers.add(provider); + } + + public int getRefCount() { + return this.refCount.get(); + } + + public void checkNotEmpty() { + if (this.providers.isEmpty()) { + throw new NoAuthWithCOSException(NO_COS_CREDENTIAL_PROVIDERS); + } + } + + public COSCredentialsProviderList share() { + Preconditions.checkState(!this.closed(), "Provider list is closed"); + this.refCount.incrementAndGet(); + return this; + } + + public boolean closed() { + return this.isClosed.get(); + } + + @Override + public COSCredentials getCredentials() { + if (this.closed()) { + throw new NoAuthWithCOSException(CREDENTIALS_REQUESTED_WHEN_CLOSED); + } + + this.checkNotEmpty(); + + if (this.reuseLastProvider && this.lastProvider != null) { + return this.lastProvider.getCredentials(); + } + + for (COSCredentialsProvider provider : this.providers) { + COSCredentials credentials = provider.getCredentials(); + if (null != credentials + && !StringUtils.isNullOrEmpty(credentials.getCOSAccessKeyId()) + && !StringUtils.isNullOrEmpty(credentials.getCOSSecretKey()) + || credentials instanceof AnonymousCOSCredentials) { + this.lastProvider = provider; + return credentials; + } + } + + throw new NoAuthWithCOSException( + "No COS Credentials provided by " + this.providers.toString()); + } + + @Override + public void refresh() { + if (this.closed()) { + return; + } + + for (COSCredentialsProvider cosCredentialsProvider : this.providers) { + cosCredentialsProvider.refresh(); + } + } + + @Override + public void close() throws Exception { + if (this.closed()) { + return; + } + + int remainder = this.refCount.decrementAndGet(); + if (remainder != 0) { + return; + } + this.isClosed.set(true); + + for (COSCredentialsProvider provider : this.providers) { + if (provider instanceof Closeable) { + ((Closeable) provider).close(); + } + } + } +} diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialProvider.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialProvider.java deleted file mode 100644 index 0a7786b882f8b..0000000000000 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialProvider.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.cosn.auth; - -import com.qcloud.cos.auth.BasicCOSCredentials; -import com.qcloud.cos.auth.COSCredentials; -import com.qcloud.cos.auth.COSCredentialsProvider; -import com.qcloud.cos.exception.CosClientException; -import com.qcloud.cos.utils.StringUtils; - -import org.apache.hadoop.fs.cosn.Constants; - -/** - * the provider obtaining the cos credentials from the environment variables. - */ -public class EnvironmentVariableCredentialProvider - implements COSCredentialsProvider { - @Override - public COSCredentials getCredentials() { - String secretId = System.getenv(Constants.COSN_SECRET_ID_ENV); - String secretKey = System.getenv(Constants.COSN_SECRET_KEY_ENV); - - secretId = StringUtils.trim(secretId); - secretKey = StringUtils.trim(secretKey); - - if (!StringUtils.isNullOrEmpty(secretId) - && !StringUtils.isNullOrEmpty(secretKey)) { - return new BasicCOSCredentials(secretId, secretKey); - } else { - throw new CosClientException( - "Unable to load COS credentials from environment variables" + - "(COS_SECRET_ID or COS_SECRET_KEY)"); - } - } - - @Override - public String toString() { - return "EnvironmentVariableCredentialProvider{}"; - } -} diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialsProvider.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialsProvider.java new file mode 100644 index 0000000000000..baa76908b6147 --- /dev/null +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialsProvider.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.cosn.auth; + +import com.qcloud.cos.auth.BasicCOSCredentials; +import com.qcloud.cos.auth.COSCredentials; +import com.qcloud.cos.auth.COSCredentialsProvider; +import com.qcloud.cos.utils.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.cosn.Constants; + +import javax.annotation.Nullable; +import java.net.URI; + +/** + * The provider obtaining the cos credentials from the environment variables. + */ +public class EnvironmentVariableCredentialsProvider + extends AbstractCOSCredentialsProvider implements COSCredentialsProvider { + + public EnvironmentVariableCredentialsProvider(@Nullable URI uri, + Configuration conf) { + super(uri, conf); + } + + @Override + public COSCredentials getCredentials() { + String secretId = System.getenv(Constants.COSN_SECRET_ID_ENV); + String secretKey = System.getenv(Constants.COSN_SECRET_KEY_ENV); + + secretId = StringUtils.trim(secretId); + secretKey = StringUtils.trim(secretKey); + + if (!StringUtils.isNullOrEmpty(secretId) + && !StringUtils.isNullOrEmpty(secretKey)) { + return new BasicCOSCredentials(secretId, secretKey); + } + + return null; + } + + @Override + public void refresh() { + } + + @Override + public String toString() { + return String.format("EnvironmentVariableCredentialsProvider{%s, %s}", + Constants.COSN_SECRET_ID_ENV, + Constants.COSN_SECRET_KEY_ENV); + } +} diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialProvider.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialProvider.java deleted file mode 100644 index f0635fc0d00cf..0000000000000 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialProvider.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.cosn.auth; - -import com.qcloud.cos.auth.BasicCOSCredentials; -import com.qcloud.cos.auth.COSCredentials; -import com.qcloud.cos.auth.COSCredentialsProvider; -import com.qcloud.cos.exception.CosClientException; - -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.cosn.CosNConfigKeys; - -/** - * Get the credentials from the hadoop configuration. - */ -public class SimpleCredentialProvider implements COSCredentialsProvider { - private String secretId; - private String secretKey; - - public SimpleCredentialProvider(Configuration conf) { - this.secretId = conf.get( - CosNConfigKeys.COSN_SECRET_ID_KEY - ); - this.secretKey = conf.get( - CosNConfigKeys.COSN_SECRET_KEY_KEY - ); - } - - @Override - public COSCredentials getCredentials() { - if (!StringUtils.isEmpty(this.secretId) - && !StringUtils.isEmpty(this.secretKey)) { - return new BasicCOSCredentials(this.secretId, this.secretKey); - } - throw new CosClientException("secret id or secret key is unset"); - } - -} diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialsProvider.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialsProvider.java new file mode 100644 index 0000000000000..107574a87c3aa --- /dev/null +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialsProvider.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.cosn.auth; + +import com.qcloud.cos.auth.BasicCOSCredentials; +import com.qcloud.cos.auth.COSCredentials; +import com.qcloud.cos.auth.COSCredentialsProvider; +import com.qcloud.cos.utils.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.cosn.CosNConfigKeys; + +import javax.annotation.Nullable; +import java.net.URI; + +/** + * Get the credentials from the hadoop configuration. + */ +public class SimpleCredentialsProvider + extends AbstractCOSCredentialsProvider implements COSCredentialsProvider { + private String secretId; + private String secretKey; + + public SimpleCredentialsProvider(@Nullable URI uri, Configuration conf) { + super(uri, conf); + if (null != conf) { + this.secretId = conf.get( + CosNConfigKeys.COSN_SECRET_ID_KEY); + this.secretKey = conf.get( + CosNConfigKeys.COSN_SECRET_KEY_KEY); + } + } + + @Override + public COSCredentials getCredentials() { + if (!StringUtils.isNullOrEmpty(this.secretId) + && !StringUtils.isNullOrEmpty(this.secretKey)) { + return new BasicCOSCredentials(this.secretId, this.secretKey); + } + return null; + } + + @Override + public void refresh() { + } +} diff --git a/hadoop-cloud-storage-project/hadoop-cos/site/markdown/cloud-storage/index.md b/hadoop-cloud-storage-project/hadoop-cos/src/site/markdown/cloud-storage/index.md similarity index 93% rename from hadoop-cloud-storage-project/hadoop-cos/site/markdown/cloud-storage/index.md rename to hadoop-cloud-storage-project/hadoop-cos/src/site/markdown/cloud-storage/index.md index 7049b3f0f013f..9c96ac3659815 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/site/markdown/cloud-storage/index.md +++ b/hadoop-cloud-storage-project/hadoop-cos/src/site/markdown/cloud-storage/index.md @@ -130,20 +130,19 @@ Each user needs to properly configure the credentials ( User's secreteId and sec ```xml fs.cosn.credentials.provider - org.apache.hadoop.fs.auth.SimpleCredentialProvider + org.apache.hadoop.fs.auth.SimpleCredentialsProvider This option allows the user to specify how to get the credentials. Comma-separated class names of credential provider classes which implement com.qcloud.cos.auth.COSCredentialsProvider: - 1.org.apache.hadoop.fs.auth.SimpleCredentialProvider: Obtain the secret id and secret key - from fs.cosn.userinfo.secretId and fs.cosn.userinfo.secretKey in core-site.xml - 2.org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider: Obtain the secret id and secret key from system environment variables named COS_SECRET_ID and COS_SECRET_KEY + 1.org.apache.hadoop.fs.auth.SimpleCredentialsProvider: Obtain the secret id and secret key from fs.cosn.userinfo.secretId and fs.cosn.userinfo.secretKey in core-site.xml + 2.org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider: Obtain the secret id and secret key from system environment variables named COS_SECRET_ID and COS_SECRET_KEY If unspecified, the default order of credential providers is: - 1. org.apache.hadoop.fs.auth.SimpleCredentialProvider - 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider + 1. org.apache.hadoop.fs.auth.SimpleCredentialsProvider + 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider @@ -237,7 +236,7 @@ Hadoop-COS provides rich runtime properties to set, and most of these do not req | properties | description | default value | required | |:----------:|:-----------|:-------------:|:--------:| | fs.defaultFS | Configure the default file system used by Hadoop.| None | NO | -| fs.cosn.credentials.provider | This option allows the user to specify how to get the credentials. Comma-separated class names of credential provider classes which implement com.qcloud.cos.auth.COSCredentialsProvider:
1. org.apache.hadoop.fs.cos.auth.SimpleCredentialProvider: Obtain the secret id and secret key from `fs.cosn.userinfo.secretId` and `fs.cosn.userinfo.secretKey` in core-site.xml;
2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider: Obtain the secret id and secret key from system environment variables named `COSN_SECRET_ID` and `COSN_SECRET_KEY`.

If unspecified, the default order of credential providers is:
1. org.apache.hadoop.fs.auth.SimpleCredentialProvider;
2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider. | None | NO | +| fs.cosn.credentials.provider | This option allows the user to specify how to get the credentials. Comma-separated class names of credential provider classes which implement com.qcloud.cos.auth.COSCredentialsProvider:
1. org.apache.hadoop.fs.cos.auth.SimpleCredentialsProvider: Obtain the secret id and secret key from `fs.cosn.userinfo.secretId` and `fs.cosn.userinfo.secretKey` in core-site.xml;
2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider: Obtain the secret id and secret key from system environment variables named `COSN_SECRET_ID` and `COSN_SECRET_KEY`.

If unspecified, the default order of credential providers is:
1. org.apache.hadoop.fs.auth.SimpleCredentialsProvider;
2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider. | None | NO | | fs.cosn.userinfo.secretId/secretKey | The API key information of your account | None | YES | | fs.cosn.bucket.region | The region where the bucket is located. | None | YES | | fs.cosn.impl | The implementation class of the CosN filesystem. | None | YES | diff --git a/hadoop-cloud-storage-project/hadoop-cos/site/resources/css/site.css b/hadoop-cloud-storage-project/hadoop-cos/src/site/resources/css/site.css similarity index 100% rename from hadoop-cloud-storage-project/hadoop-cos/site/resources/css/site.css rename to hadoop-cloud-storage-project/hadoop-cos/src/site/resources/css/site.css diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/TestCosCredentials.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/TestCosCredentials.java new file mode 100644 index 0000000000000..8b74f3639ddbd --- /dev/null +++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/TestCosCredentials.java @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.cosn; + +import com.qcloud.cos.auth.COSCredentials; +import com.qcloud.cos.auth.COSCredentialsProvider; +import org.apache.hadoop.conf.Configuration; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +public class TestCosCredentials { + private static final Logger LOG = + LoggerFactory.getLogger(TestCosCredentials.class); + + private final URI fsUri; + + private final String testCosNSecretId = "secretId"; + private final String testCosNSecretKey = "secretKey"; + private final String testCosNEnvSecretId = "env_secretId"; + private final String testCosNEnvSecretKey = "env_secretKey"; + + public TestCosCredentials() throws URISyntaxException { + // A fake uri for tests. + this.fsUri = new URI("cosn://test-bucket-1250000000"); + } + + @Test + public void testSimpleCredentialsProvider() throws Throwable { + Configuration configuration = new Configuration(); + configuration.set(CosNConfigKeys.COSN_SECRET_ID_KEY, + testCosNSecretId); + configuration.set(CosNConfigKeys.COSN_SECRET_KEY_KEY, + testCosNSecretKey); + validateCredentials(this.fsUri, configuration); + } + + @Test + public void testEnvironmentCredentialsProvider() throws Throwable { + Configuration configuration = new Configuration(); + // Set EnvironmentVariableCredentialsProvider as the CosCredentials + // Provider. + configuration.set(CosNConfigKeys.COSN_CREDENTIALS_PROVIDER, + "org.apache.hadoop.fs.cosn.EnvironmentVariableCredentialsProvider"); + // Set the environment variables storing the secret id and secret key. + System.setProperty(Constants.COSN_SECRET_ID_ENV, testCosNEnvSecretId); + System.setProperty(Constants.COSN_SECRET_KEY_ENV, testCosNEnvSecretKey); + validateCredentials(this.fsUri, configuration); + } + + private void validateCredentials(URI uri, Configuration configuration) + throws IOException { + if (null != configuration) { + COSCredentialsProvider credentialsProvider = + CosNUtils.createCosCredentialsProviderSet(uri, configuration); + COSCredentials cosCredentials = credentialsProvider.getCredentials(); + assertNotNull("The cos credentials obtained is null.", cosCredentials); + if (configuration.get( + CosNConfigKeys.COSN_CREDENTIALS_PROVIDER).compareToIgnoreCase( + "org.apache.hadoop.fs.cosn.EnvironmentVariableCredentialsProvider") + == 0) { + if (null == cosCredentials.getCOSAccessKeyId() + || cosCredentials.getCOSAccessKeyId().isEmpty() + || null == cosCredentials.getCOSSecretKey() + || cosCredentials.getCOSSecretKey().isEmpty()) { + String failMessage = String.format( + "Test EnvironmentVariableCredentialsProvider failed. The " + + "expected is [secretId: %s, secretKey: %s], but got null or" + + " empty.", testCosNEnvSecretId, testCosNEnvSecretKey); + fail(failMessage); + } + + if (cosCredentials.getCOSAccessKeyId() + .compareTo(testCosNEnvSecretId) != 0 + || cosCredentials.getCOSSecretKey() + .compareTo(testCosNEnvSecretKey) != 0) { + String failMessage = String.format("Test " + + "EnvironmentVariableCredentialsProvider failed. " + + "The expected is [secretId: %s, secretKey: %s], but got is " + + "[secretId:%s, secretKey:%s].", testCosNEnvSecretId, + testCosNEnvSecretKey, cosCredentials.getCOSAccessKeyId(), + cosCredentials.getCOSSecretKey()); + } + // expected + } else { + if (null == cosCredentials.getCOSAccessKeyId() + || cosCredentials.getCOSAccessKeyId().isEmpty() + || null == cosCredentials.getCOSSecretKey() + || cosCredentials.getCOSSecretKey().isEmpty()) { + String failMessage = String.format( + "Test COSCredentials failed. The " + + "expected is [secretId: %s, secretKey: %s], but got null or" + + " empty.", testCosNSecretId, testCosNSecretKey); + fail(failMessage); + } + if (cosCredentials.getCOSAccessKeyId() + .compareTo(testCosNSecretId) != 0 + || cosCredentials.getCOSSecretKey() + .compareTo(testCosNSecretKey) != 0) { + String failMessage = String.format("Test " + + "EnvironmentVariableCredentialsProvider failed. " + + "The expected is [secretId: %s, secretKey: %s], but got is " + + "[secretId:%s, secretKey:%s].", testCosNSecretId, + testCosNSecretKey, cosCredentials.getCOSAccessKeyId(), + cosCredentials.getCOSSecretKey()); + fail(failMessage); + } + // expected + } + } + } +} diff --git a/hadoop-cloud-storage-project/pom.xml b/hadoop-cloud-storage-project/pom.xml index f39e8c3aaf9f8..da0d88a8117b8 100644 --- a/hadoop-cloud-storage-project/pom.xml +++ b/hadoop-cloud-storage-project/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../hadoop-project hadoop-cloud-storage-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT Apache Hadoop Cloud Storage Project Apache Hadoop Cloud Storage Project pom diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml index 738f0ada8f1e9..a262d55b0426c 100644 --- a/hadoop-common-project/hadoop-annotations/pom.xml +++ b/hadoop-common-project/hadoop-annotations/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project hadoop-annotations - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT Apache Hadoop Annotations Apache Hadoop Annotations jar diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml index fb904912999b8..4deda432797e0 100644 --- a/hadoop-common-project/hadoop-auth-examples/pom.xml +++ b/hadoop-common-project/hadoop-auth-examples/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project hadoop-auth-examples - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT war Apache Hadoop Auth Examples diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 20a3e7059b154..4ff3bc14927fe 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project hadoop-auth - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT jar Apache Hadoop Auth diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java index 1093d8a2539eb..488400647cf06 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java @@ -99,7 +99,10 @@ public void put(URI uri, Map> responseHeaders) { cookies = HttpCookie.parse(header); } catch (IllegalArgumentException iae) { // don't care. just skip malformed cookie headers. - LOG.debug("Cannot parse cookie header: " + header, iae); + // When header is empty - "Cannot parse cookie header, header = , + // reason = Empty cookie header string" + LOG.debug("Cannot parse cookie header, header = {}, reason = {} ", + header, iae.getMessage()); continue; } for (HttpCookie cookie : cookies) { diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml index 802197e33cbcd..cf5c3874d1063 100644 --- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml +++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml @@ -460,4 +460,10 @@ + + + + + + diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 84d3ae5b5addc..dd058812fc774 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project-dist - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project-dist hadoop-common - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT Apache Hadoop Common Apache Hadoop Common jar @@ -39,6 +39,10 @@ + + org.apache.hadoop.thirdparty + hadoop-shaded-protobuf_3_7 + org.apache.hadoop hadoop-annotations @@ -90,8 +94,8 @@ compile - javax.activation - javax.activation-api + jakarta.activation + jakarta.activation-api runtime @@ -272,6 +276,11 @@ sshd-core test + + org.apache.ftpserver + ftpserver-core + test + org.apache.htrace @@ -346,12 +355,12 @@ org.wildfly.openssl wildfly-openssl - provided + test - org.assertj - assertj-core - test + org.wildfly.openssl + wildfly-openssl-java + provided @@ -394,6 +403,36 @@ + + com.google.code.maven-replacer-plugin + replacer + + + replace-generated-sources + + false + + + + replace-generated-test-sources + + false + + + + replace-sources + + false + + + + replace-test-sources + + false + + + + org.apache.hadoop hadoop-maven-plugins @@ -842,9 +881,13 @@ parallel-tests-createdir + process-test-resources parallel-tests-createdir + + ${test.build.data} + @@ -852,6 +895,7 @@ org.apache.maven.plugins maven-surefire-plugin + ${ignoreTestFailure} ${testsThreadCount} false ${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh index 484fe2302f9ba..4be554aef6c25 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh @@ -213,7 +213,7 @@ function hadoop_privilege_check [[ "${EUID}" = 0 ]] } -## @description Execute a command via su when running as root +## @description Execute a command via sudo when running as root ## @description if the given user is found or exit with ## @description failure if not. ## @description otherwise just run it. (This is intended to @@ -224,14 +224,14 @@ function hadoop_privilege_check ## @param user ## @param commandstring ## @return exitstatus -function hadoop_su +function hadoop_sudo { declare user=$1 shift if hadoop_privilege_check; then if hadoop_verify_user_resolves user; then - su -l "${user}" -- "$@" + sudo -u "${user}" -- "$@" else hadoop_error "ERROR: Refusing to run as root: ${user} account is not found. Aborting." return 1 @@ -241,7 +241,7 @@ function hadoop_su fi } -## @description Execute a command via su when running as root +## @description Execute a command via sudo when running as root ## @description with extra support for commands that might ## @description legitimately start as root (e.g., datanode) ## @description (This is intended to @@ -259,7 +259,7 @@ function hadoop_uservar_su # # if $EUID != 0, then exec # if $EUID =0 then - # if hdfs_subcmd_user is defined, call hadoop_su to exec + # if hdfs_subcmd_user is defined, call hadoop_sudo to exec # if hdfs_subcmd_user is not defined, error # # For secure daemons, this means both the secure and insecure env vars need to be @@ -283,7 +283,7 @@ function hadoop_uservar_su svar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_USER) if [[ -n "${!uvar}" ]]; then - hadoop_su "${!uvar}" "$@" + hadoop_sudo "${!uvar}" "$@" elif [[ -n "${!svar}" ]]; then ## if we are here, then SECURE_USER with no USER defined ## we are already privileged, so just run the command and hope @@ -596,11 +596,6 @@ function hadoop_bootstrap YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"} MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"} MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"} - HDDS_DIR=${HDDS_DIR:-"share/hadoop/hdds"} - HDDS_LIB_JARS_DIR=${HDDS_LIB_JARS_DIR:-"share/hadoop/hdds/lib"} - OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"} - OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"} - OZONEFS_DIR=${OZONEFS_DIR:-"share/hadoop/ozonefs"} HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}} HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"} @@ -1342,7 +1337,7 @@ function hadoop_add_to_classpath_tools # shellcheck disable=SC1090 . "${HADOOP_LIBEXEC_DIR}/tools/${module}.sh" else - hadoop_error "ERROR: Tools helper ${HADOOP_LIBEXEC_DIR}/tools/${module}.sh was not found." + hadoop_debug "Tools helper ${HADOOP_LIBEXEC_DIR}/tools/${module}.sh was not found." fi if declare -f hadoop_classpath_tools_${module} >/dev/null 2>&1; then @@ -1921,6 +1916,22 @@ function hadoop_start_secure_daemon exit 1 fi + if [[ -z "${HADOOP_DAEMON_JSVC_EXTRA_OPTS}" ]]; then + # If HADOOP_DAEMON_JSVC_EXTRA_OPTS is not set + if ${jsvc} -help | grep -q "\-cwd"; then + # Check if jsvc -help has entry for option -cwd + hadoop_debug "Your jsvc supports -cwd option." \ + "Adding option '-cwd .'. See HADOOP-16276 for details." + HADOOP_DAEMON_JSVC_EXTRA_OPTS="-cwd ." + else + hadoop_debug "Your jsvc doesn't support -cwd option." \ + "No need to add option '-cwd .'. See HADOOP-16276 for details." + fi + else + hadoop_debug "HADOOP_DAEMON_JSVC_EXTRA_OPTS is set." \ + "Ignoring jsvc -cwd option detection and addition." + fi + # note that shellcheck will throw a # bogus for-our-use-case 2086 here. # it doesn't properly support multi-line situations @@ -2035,7 +2046,8 @@ function hadoop_start_secure_daemon_wrapper hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!" fi # capture the ulimit output - su "${HADOOP_SECURE_USER}" -c 'bash -c "ulimit -a"' >> "${jsvcoutfile}" 2>&1 + #shellcheck disable=SC2024 + sudo -u "${HADOOP_SECURE_USER}" bash -c "ulimit -a" >> "${jsvcoutfile}" 2>&1 #shellcheck disable=SC2086 if ! ps -p $! >/dev/null 2>&1; then return 1 diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh index e43cd95b047ee..f4625f5999b1c 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh @@ -390,15 +390,6 @@ export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)} # # export HDFS_DFSROUTER_OPTS="" -### -# Ozone Manager specific parameters -### -# Specify the JVM options to be used when starting the Ozone Manager. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_OM_OPTS="" - ### # HDFS StorageContainerManager specific parameters ### diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index 7f9ea462679b3..52d2c1ff038e6 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -282,13 +282,6 @@ log4j.appender.NMAUDIT.MaxBackupIndex=${nm.audit.log.maxbackupindex} #log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log #log4j.appender.nodemanagerrequestlog.RetainDays=3 -#Http Server request logs for Ozone S3Gateway -log4j.logger.http.requests.s3gateway=INFO,s3gatewayrequestlog -log4j.appender.s3gatewayrequestlog=org.apache.hadoop.http.HttpRequestLogAppender -log4j.appender.s3gatewayrequestlog.Filename=${hadoop.log.dir}/jetty-s3gateway-yyyy_mm_dd.log -log4j.appender.s3gatewayrequestlog.RetainDays=3 - - # WebHdfs request log on datanodes # Specify -Ddatanode.webhdfs.logger=INFO,HTTPDRFA on datanode startup to # direct the log to a separate file. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 180bde26574ca..9751a9b66945c 100755 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -51,7 +51,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import java.util.ListIterator; import java.util.Map; @@ -1691,7 +1690,11 @@ public boolean getBoolean(String name, boolean defaultValue) { return true; else if (StringUtils.equalsIgnoreCase("false", valueString)) return false; - else return defaultValue; + else { + LOG.warn("Invalid value for boolean: " + valueString + + ", choose default value: " + defaultValue + " for " + name); + return defaultValue; + } } /** @@ -3350,6 +3353,7 @@ void parseNext() throws IOException, XMLStreamException { handleStartElement(); break; case XMLStreamConstants.CHARACTERS: + case XMLStreamConstants.CDATA: if (parseToken) { char[] text = reader.getTextCharacters(); token.append(text, reader.getTextStart(), reader.getTextLength()); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index 0453ca14537c3..1df68b647c99a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -31,7 +31,6 @@ import java.util.List; import java.util.Map; import java.util.NoSuchElementException; -import java.util.Set; import java.util.StringTokenizer; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; @@ -45,6 +44,7 @@ import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.impl.AbstractFSBuilderImpl; +import org.apache.hadoop.fs.impl.OpenFileParameters; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; @@ -1355,22 +1355,20 @@ public boolean equals(Object other) { * setting up the expectation that the {@code get()} call * is needed to evaluate the result. * @param path path to the file - * @param mandatoryKeys set of options declared as mandatory. - * @param options options set during the build sequence. - * @param bufferSize buffer size + * @param parameters open file parameters from the builder. * @return a future which will evaluate to the opened file. * @throws IOException failure to resolve the link. * @throws IllegalArgumentException unknown mandatory key */ public CompletableFuture openFileWithOptions(Path path, - Set mandatoryKeys, - Configuration options, - int bufferSize) throws IOException { - AbstractFSBuilderImpl.rejectUnknownMandatoryKeys(mandatoryKeys, + final OpenFileParameters parameters) throws IOException { + AbstractFSBuilderImpl.rejectUnknownMandatoryKeys( + parameters.getMandatoryKeys(), Collections.emptySet(), "for " + path); return LambdaUtils.eval( - new CompletableFuture<>(), () -> open(path, bufferSize)); + new CompletableFuture<>(), () -> + open(path, parameters.getBufferSize())); } public boolean hasPathCapability(final Path path, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchListingOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchListingOperations.java new file mode 100644 index 0000000000000..f72b1e288eb49 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchListingOperations.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Interface filesystems MAY implement to offer a batched list. + * If implemented, filesystems SHOULD declare + * {@link CommonPathCapabilities#FS_EXPERIMENTAL_BATCH_LISTING} to be a supported + * path capability. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public interface BatchListingOperations { + + /** + * Batched listing API that returns {@link PartialListing}s for the + * passed Paths. + * + * @param paths List of paths to list. + * @return RemoteIterator that returns corresponding PartialListings. + * @throws IOException failure + */ + RemoteIterator> batchedListStatusIterator( + List paths) throws IOException; + + /** + * Batched listing API that returns {@link PartialListing}s for the passed + * Paths. The PartialListing will contain {@link LocatedFileStatus} entries + * with locations. + * + * @param paths List of paths to list. + * @return RemoteIterator that returns corresponding PartialListings. + * @throws IOException failure + */ + RemoteIterator> + batchedListLocatedStatusIterator( + List paths) throws IOException; + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java index 92476d77ddb44..58dc82d2efb2d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java @@ -47,6 +47,7 @@ public abstract class CachingGetSpaceUsed implements Closeable, GetSpaceUsed { private final long jitter; private final String dirPath; private Thread refreshUsed; + private boolean shouldFirstRefresh; /** * This is the constructor used by the builder. @@ -79,16 +80,30 @@ public CachingGetSpaceUsed(CachingGetSpaceUsed.Builder builder) this.refreshInterval = interval; this.jitter = jitter; this.used.set(initialUsed); + this.shouldFirstRefresh = true; } void init() { if (used.get() < 0) { used.set(0); + if (!shouldFirstRefresh) { + // Skip initial refresh operation, so we need to do first refresh + // operation immediately in refresh thread. + initRefeshThread(true); + return; + } refresh(); } + initRefeshThread(false); + } + /** + * RunImmediately should set true, if we skip the first refresh. + * @param runImmediately The param default should be false. + */ + private void initRefeshThread (boolean runImmediately) { if (refreshInterval > 0) { - refreshUsed = new Thread(new RefreshThread(this), + refreshUsed = new Thread(new RefreshThread(this, runImmediately), "refreshUsed-" + dirPath); refreshUsed.setDaemon(true); refreshUsed.start(); @@ -100,6 +115,14 @@ void init() { protected abstract void refresh(); + /** + * Reset that if we need to do the first refresh. + * @param shouldFirstRefresh The flag value to set. + */ + protected void setShouldFirstRefresh(boolean shouldFirstRefresh) { + this.shouldFirstRefresh = shouldFirstRefresh; + } + /** * @return an estimate of space used in the directory path. */ @@ -156,9 +179,11 @@ public void close() throws IOException { private static final class RefreshThread implements Runnable { final CachingGetSpaceUsed spaceUsed; + private boolean runImmediately; - RefreshThread(CachingGetSpaceUsed spaceUsed) { + RefreshThread(CachingGetSpaceUsed spaceUsed, boolean runImmediately) { this.spaceUsed = spaceUsed; + this.runImmediately = runImmediately; } @Override @@ -176,7 +201,10 @@ public void run() { } // Make sure that after the jitter we didn't end up at 0. refreshInterval = Math.max(refreshInterval, 1); - Thread.sleep(refreshInterval); + if (!runImmediately) { + Thread.sleep(refreshInterval); + } + runImmediately = false; // update the used variable spaceUsed.refresh(); } catch (InterruptedException e) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java index 5e5d29a28bfce..cc9c284c9fa55 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java @@ -27,8 +27,6 @@ import java.util.Collections; import java.util.EnumSet; import java.util.List; -import java.util.Locale; -import java.util.Set; import java.util.concurrent.CompletableFuture; import com.google.common.base.Preconditions; @@ -37,6 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.impl.AbstractFSBuilderImpl; import org.apache.hadoop.fs.impl.FutureDataInputStreamBuilderImpl; +import org.apache.hadoop.fs.impl.OpenFileParameters; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.DataChecksum; @@ -845,14 +844,14 @@ public FutureDataInputStreamBuilder openFile(final Path path) @Override protected CompletableFuture openFileWithOptions( final Path path, - final Set mandatoryKeys, - final Configuration options, - final int bufferSize) throws IOException { - AbstractFSBuilderImpl.rejectUnknownMandatoryKeys(mandatoryKeys, + final OpenFileParameters parameters) throws IOException { + AbstractFSBuilderImpl.rejectUnknownMandatoryKeys( + parameters.getMandatoryKeys(), Collections.emptySet(), "for " + path); return LambdaUtils.eval( - new CompletableFuture<>(), () -> open(path, bufferSize)); + new CompletableFuture<>(), + () -> open(path, parameters.getBufferSize())); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java index bbbf073cc241c..908d67723e7be 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java @@ -27,7 +27,7 @@ * cluster filesystem is exceeded. See also * https://issues.apache.org/jira/browse/MAPREDUCE-7148. */ -@InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" }) +@InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce", "Tez" }) @InterfaceStability.Evolving public class ClusterStorageCapacityExceededException extends IOException { private static final long serialVersionUID = 1L; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java index 8c09db1284cff..c08af395ad2f9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java @@ -114,6 +114,9 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic { "callqueue.overflow.trigger.failover"; public static final boolean IPC_CALLQUEUE_SERVER_FAILOVER_ENABLE_DEFAULT = false; + /** Callqueue subqueue capacity weights. */ + public static final String IPC_CALLQUEUE_CAPACITY_WEIGHTS_KEY = + "callqueue.capacity.weights"; /** * IPC scheduler priority levels. @@ -426,4 +429,13 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic { "dfs.client.ignore.namenode.default.kms.uri"; public static final boolean DFS_CLIENT_IGNORE_NAMENODE_DEFAULT_KMS_URI_DEFAULT = false; + + /** + * Whether or not ThreadMXBean is used for getting thread info in JvmMetrics, + * ThreadGroup approach is preferred for better performance. + */ + public static final String HADOOP_METRICS_JVM_USE_THREAD_MXBEAN = + "hadoop.metrics.jvm.use-thread-mxbean"; + public static final boolean HADOOP_METRICS_JVM_USE_THREAD_MXBEAN_DEFAULT = + false; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index a68012b06d2bc..ce132f9a37891 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -988,5 +988,14 @@ public class CommonConfigurationKeysPublic { public static final String HADOOP_PROMETHEUS_ENABLED = "hadoop.prometheus.endpoint.enabled"; public static final boolean HADOOP_PROMETHEUS_ENABLED_DEFAULT = false; + + /** + * @see + * + * core-default.xml + */ + public static final String HADOOP_HTTP_IDLE_TIMEOUT_MS_KEY = + "hadoop.http.idle_timeout.ms"; + public static final int HADOOP_HTTP_IDLE_TIMEOUT_MS_DEFAULT = 1000; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java index 31e6bac0ccee5..fb46ef81e36fa 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java @@ -18,6 +18,8 @@ package org.apache.hadoop.fs; +import org.apache.hadoop.classification.InterfaceStability; + /** * Common path capabilities. */ @@ -123,4 +125,10 @@ private CommonPathCapabilities() { */ public static final String FS_XATTRS = "fs.capability.paths.xattrs"; + /** + * Probe for support for {@link BatchListingOperations}. + */ + @InterfaceStability.Unstable + public static final String FS_EXPERIMENTAL_BATCH_LISTING = + "fs.capability.batch.listing"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java index cdbd10f636dd3..20e205a8b32cf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java @@ -281,6 +281,21 @@ public int hashCode() { private static final String ALL_HEADER = QUOTA_HEADER + SUMMARY_HEADER; + /** + * Output format:<-------18-------> <----------24----------> + * <----------24---------->. <-------------28------------> SNAPSHOT_LENGTH + * SNAPSHOT_FILE_COUNT SNAPSHOT_DIR_COUNT SNAPSHOT_SPACE_CONSUMED + */ + private static final String SNAPSHOT_FORMAT = "%18s %24s %24s %28s "; + + private static final String[] SNAPSHOT_HEADER_FIELDS = + new String[] {"SNAPSHOT_LENGTH", "SNAPSHOT_FILE_COUNT", + "SNAPSHOT_DIR_COUNT", "SNAPSHOT_SPACE_CONSUMED"}; + + /** The header string. */ + private static final String SNAPSHOT_HEADER = + String.format(SNAPSHOT_FORMAT, (Object[]) SNAPSHOT_HEADER_FIELDS); + /** Return the header of the output. * if qOption is false, output directory count, file count, and content size; @@ -293,7 +308,9 @@ public static String getHeader(boolean qOption) { return qOption ? ALL_HEADER : SUMMARY_HEADER; } - + public static String getSnapshotHeader() { + return SNAPSHOT_HEADER; + } /** * Returns the names of the fields from the summary header. @@ -416,7 +433,7 @@ public String toString(boolean qOption, boolean hOption, boolean tOption, } /** - * Formats a size to be human readable or in bytes + * Formats a size to be human readable or in bytes. * @param size value to be formatted * @param humanReadable flag indicating human readable or not * @return String representation of the size @@ -426,4 +443,17 @@ private String formatSize(long size, boolean humanReadable) { ? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1) : String.valueOf(size); } + + /** + * Return the string representation of the snapshot counts in the output + * format. + * @param hOption flag indicating human readable or not + * @return String representation of the snapshot counts + */ + public String toSnapshot(boolean hOption) { + return String.format(SNAPSHOT_FORMAT, formatSize(snapshotLength, hOption), + formatSize(snapshotFileCount, hOption), + formatSize(snapshotDirectoryCount, hOption), + formatSize(snapshotSpaceConsumed, hOption)); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java index 58b5f704bb831..71993713ad2eb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java @@ -123,7 +123,13 @@ public enum CreateFlag { * locality. The first block replica should be placed randomly within the * cluster. Subsequent block replicas should follow DataNode locality rules. */ - IGNORE_CLIENT_LOCALITY((short) 0x100); + IGNORE_CLIENT_LOCALITY((short) 0x100), + + /** + * Advise that a block replica NOT be written to the local rack DataNode where + * 'local' means the same rack as the client is being run on. + */ + NO_LOCAL_RACK((short) 0x120); private final short mode; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java index a8f294f379158..3a139781e0372 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java @@ -24,13 +24,13 @@ import java.util.Arrays; import java.util.EnumSet; import java.util.List; -import java.util.Set; import java.util.concurrent.CompletableFuture; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Options.ChecksumOpt; +import org.apache.hadoop.fs.impl.OpenFileParameters; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; @@ -266,20 +266,17 @@ public List> getDelegationTokens(String renewer) throws IOException { /** * Open a file by delegating to - * {@link FileSystem#openFileWithOptions(Path, Set, Configuration, int)}. + * {@link FileSystem#openFileWithOptions(Path, org.apache.hadoop.fs.impl.OpenFileParameters)}. * @param path path to the file - * @param mandatoryKeys set of options declared as mandatory. - * @param options options set during the build sequence. - * @param bufferSize buffer size - * @return a future which will evaluate to the opened file. + * @param parameters open file parameters from the builder. + * + * @return a future which will evaluate to the opened file.ControlAlpha * @throws IOException failure to resolve the link. * @throws IllegalArgumentException unknown mandatory key */ public CompletableFuture openFileWithOptions(Path path, - Set mandatoryKeys, - Configuration options, - int bufferSize) throws IOException { - return fsImpl.openFileWithOptions(path, mandatoryKeys, options, bufferSize); + final OpenFileParameters parameters) throws IOException { + return fsImpl.openFileWithOptions(path, parameters); } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index b2c1369a9c1fe..ba0064f0813d3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -47,7 +47,7 @@ import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.impl.FutureDataInputStreamBuilderImpl; import org.apache.hadoop.fs.impl.FsLinkResolution; -import org.apache.hadoop.fs.impl.PathCapabilitiesSupport; +import org.apache.hadoop.fs.impl.OpenFileParameters; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; @@ -483,7 +483,7 @@ public static FileContext getFileContext(final URI defaultFsUri, */ public static FileContext getFileContext(final Configuration aConf) throws UnsupportedFileSystemException { - final URI defaultFsUri = URI.create(aConf.get(FS_DEFAULT_NAME_KEY, + final URI defaultFsUri = URI.create(aConf.getTrimmed(FS_DEFAULT_NAME_KEY, FS_DEFAULT_NAME_DEFAULT)); if ( defaultFsUri.getScheme() != null && !defaultFsUri.getScheme().trim().isEmpty()) { @@ -2924,16 +2924,18 @@ protected FSDataInputStreamBuilder( @Override public CompletableFuture build() throws IOException { final Path absF = fixRelativePart(getPath()); + OpenFileParameters parameters = new OpenFileParameters() + .withMandatoryKeys(getMandatoryKeys()) + .withOptions(getOptions()) + .withBufferSize(getBufferSize()) + .withStatus(getStatus()); return new FSLinkResolver>() { @Override public CompletableFuture next( final AbstractFileSystem fs, final Path p) throws IOException { - return fs.openFileWithOptions(p, - getMandatoryKeys(), - getOptions(), - getBufferSize()); + return fs.openFileWithOptions(p, parameters); } }.resolve(FileContext.this, absF); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 2376c051c99f9..abb31ed869591 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -58,11 +58,13 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.impl.AbstractFSBuilderImpl; import org.apache.hadoop.fs.impl.FutureDataInputStreamBuilderImpl; +import org.apache.hadoop.fs.impl.OpenFileParameters; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsCreateModes; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; @@ -130,6 +132,25 @@ * New methods may be marked as Unstable or Evolving for their initial release, * as a warning that they are new and may change based on the * experience of use in applications. + * Important note for developers + * + * If you're making changes here to the public API or protected methods, + * you must review the following subclasses and make sure that + * they are filtering/passing through new methods as appropriate. + * + * {@link FilterFileSystem}: methods are passed through. + * {@link ChecksumFileSystem}: checksums are created and + * verified. + * {@code TestHarFileSystem} will need its {@code MustNotImplement} + * interface updated. + * + * There are some external places your changes will break things. + * Do co-ordinate changes here. + * + * HBase: HBoss + * Hive: HiveShim23 + * {@code shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java} + * *****************************************************************/ @SuppressWarnings("DeprecatedIsStillUsed") @InterfaceAudience.Public @@ -257,7 +278,8 @@ public static FileSystem get(Configuration conf) throws IOException { * @return the uri of the default filesystem */ public static URI getDefaultUri(Configuration conf) { - URI uri = URI.create(fixName(conf.get(FS_DEFAULT_NAME_KEY, DEFAULT_FS))); + URI uri = + URI.create(fixName(conf.getTrimmed(FS_DEFAULT_NAME_KEY, DEFAULT_FS))); if (uri.getScheme() == null) { throw new IllegalArgumentException("No scheme in default FS: " + uri); } @@ -2159,24 +2181,19 @@ protected class DirListingIterator implements private DirectoryEntries entries; private int i = 0; - DirListingIterator(Path path) { + DirListingIterator(Path path) throws IOException { this.path = path; + this.entries = listStatusBatch(path, null); } @Override public boolean hasNext() throws IOException { - if (entries == null) { - fetchMore(); - } return i < entries.getEntries().length || entries.hasMore(); } private void fetchMore() throws IOException { - byte[] token = null; - if (entries != null) { - token = entries.getToken(); - } + byte[] token = entries.getToken(); entries = listStatusBatch(path, token); i = 0; } @@ -3391,9 +3408,22 @@ private static FileSystem createFileSystem(URI uri, Configuration conf) Tracer tracer = FsTracer.get(conf); try(TraceScope scope = tracer.newScope("FileSystem#createFileSystem")) { scope.addKVAnnotation("scheme", uri.getScheme()); - Class clazz = getFileSystemClass(uri.getScheme(), conf); - FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf); - fs.initialize(uri, conf); + Class clazz = + getFileSystemClass(uri.getScheme(), conf); + FileSystem fs = ReflectionUtils.newInstance(clazz, conf); + try { + fs.initialize(uri, conf); + } catch (IOException | RuntimeException e) { + // exception raised during initialization. + // log summary at warn and full stack at debug + LOGGER.warn("Failed to initialize fileystem {}: {}", + uri, e.toString()); + LOGGER.debug("Failed to initialize fileystem", e); + // then (robustly) close the FS, so as to invoke any + // cleanup code. + IOUtils.cleanupWithLogger(LOGGER, fs); + throw e; + } return fs; } } @@ -4449,43 +4479,39 @@ public FutureDataInputStreamBuilder openFile(PathHandle pathHandle) * the action of opening the file should begin. * * The base implementation performs a blocking - * call to {@link #open(Path, int)}in this call; + * call to {@link #open(Path, int)} in this call; * the actual outcome is in the returned {@code CompletableFuture}. * This avoids having to create some thread pool, while still * setting up the expectation that the {@code get()} call * is needed to evaluate the result. * @param path path to the file - * @param mandatoryKeys set of options declared as mandatory. - * @param options options set during the build sequence. - * @param bufferSize buffer size + * @param parameters open file parameters from the builder. * @return a future which will evaluate to the opened file. * @throws IOException failure to resolve the link. * @throws IllegalArgumentException unknown mandatory key */ protected CompletableFuture openFileWithOptions( final Path path, - final Set mandatoryKeys, - final Configuration options, - final int bufferSize) throws IOException { - AbstractFSBuilderImpl.rejectUnknownMandatoryKeys(mandatoryKeys, + final OpenFileParameters parameters) throws IOException { + AbstractFSBuilderImpl.rejectUnknownMandatoryKeys( + parameters.getMandatoryKeys(), Collections.emptySet(), "for " + path); return LambdaUtils.eval( - new CompletableFuture<>(), () -> open(path, bufferSize)); + new CompletableFuture<>(), () -> + open(path, parameters.getBufferSize())); } /** * Execute the actual open file operation. * The base implementation performs a blocking - * call to {@link #open(Path, int)}in this call; + * call to {@link #open(Path, int)} in this call; * the actual outcome is in the returned {@code CompletableFuture}. * This avoids having to create some thread pool, while still * setting up the expectation that the {@code get()} call * is needed to evaluate the result. * @param pathHandle path to the file - * @param mandatoryKeys set of options declared as mandatory. - * @param options options set during the build sequence. - * @param bufferSize buffer size + * @param parameters open file parameters from the builder. * @return a future which will evaluate to the opened file. * @throws IOException failure to resolve the link. * @throws IllegalArgumentException unknown mandatory key @@ -4494,14 +4520,13 @@ protected CompletableFuture openFileWithOptions( */ protected CompletableFuture openFileWithOptions( final PathHandle pathHandle, - final Set mandatoryKeys, - final Configuration options, - final int bufferSize) throws IOException { - AbstractFSBuilderImpl.rejectUnknownMandatoryKeys(mandatoryKeys, + final OpenFileParameters parameters) throws IOException { + AbstractFSBuilderImpl.rejectUnknownMandatoryKeys( + parameters.getMandatoryKeys(), Collections.emptySet(), ""); CompletableFuture result = new CompletableFuture<>(); try { - result.complete(open(pathHandle, bufferSize)); + result.complete(open(pathHandle, parameters.getBufferSize())); } catch (UnsupportedOperationException tx) { // fail fast here throw tx; @@ -4603,12 +4628,17 @@ protected FSDataInputStreamBuilder( @Override public CompletableFuture build() throws IOException { Optional optionalPath = getOptionalPath(); + OpenFileParameters parameters = new OpenFileParameters() + .withMandatoryKeys(getMandatoryKeys()) + .withOptions(getOptions()) + .withBufferSize(getBufferSize()) + .withStatus(super.getStatus()); // explicit to avoid IDE warnings if(optionalPath.isPresent()) { return getFS().openFileWithOptions(optionalPath.get(), - getMandatoryKeys(), getOptions(), getBufferSize()); + parameters); } else { return getFS().openFileWithOptions(getPathHandle(), - getMandatoryKeys(), getOptions(), getBufferSize()); + parameters); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index 4566686a126fe..7bc93f9bf5db8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -21,16 +21,20 @@ import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.BufferedReader; +import java.io.BufferedWriter; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; +import java.io.OutputStreamWriter; import java.net.InetAddress; import java.net.URI; import java.net.UnknownHostException; import java.nio.charset.Charset; +import java.nio.charset.CharsetEncoder; +import java.nio.charset.StandardCharsets; import java.nio.file.AccessDeniedException; import java.nio.file.FileSystems; import java.nio.file.Files; @@ -38,6 +42,7 @@ import java.util.Enumeration; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -1633,4 +1638,235 @@ public static boolean compareFs(FileSystem srcFs, FileSystem destFs) { // check for ports return srcUri.getPort()==dstUri.getPort(); } + + /** + * Writes bytes to a file. This utility method opens the file for writing, + * creating the file if it does not exist, or overwrites an existing file. All + * bytes in the byte array are written to the file. + * + * @param fs the file system with which to create the file + * @param path the path to the file + * @param bytes the byte array with the bytes to write + * + * @return the file system + * + * @throws NullPointerException if any of the arguments are {@code null} + * @throws IOException if an I/O error occurs creating or writing to the file + */ + public static FileSystem write(final FileSystem fs, final Path path, + final byte[] bytes) throws IOException { + + Objects.requireNonNull(path); + Objects.requireNonNull(bytes); + + try (FSDataOutputStream out = fs.createFile(path).overwrite(true).build()) { + out.write(bytes); + } + + return fs; + } + + /** + * Writes bytes to a file. This utility method opens the file for writing, + * creating the file if it does not exist, or overwrites an existing file. All + * bytes in the byte array are written to the file. + * + * @param fileContext the file context with which to create the file + * @param path the path to the file + * @param bytes the byte array with the bytes to write + * + * @return the file context + * + * @throws NullPointerException if any of the arguments are {@code null} + * @throws IOException if an I/O error occurs creating or writing to the file + */ + public static FileContext write(final FileContext fileContext, + final Path path, final byte[] bytes) throws IOException { + + Objects.requireNonNull(path); + Objects.requireNonNull(bytes); + + try (FSDataOutputStream out = + fileContext.create(path).overwrite(true).build()) { + out.write(bytes); + } + + return fileContext; + } + + /** + * Write lines of text to a file. Each line is a char sequence and is written + * to the file in sequence with each line terminated by the platform's line + * separator, as defined by the system property {@code + * line.separator}. Characters are encoded into bytes using the specified + * charset. This utility method opens the file for writing, creating the file + * if it does not exist, or overwrites an existing file. + * + * @param fs the file system with which to create the file + * @param path the path to the file + * @param lines a Collection to iterate over the char sequences + * @param cs the charset to use for encoding + * + * @return the file system + * + * @throws NullPointerException if any of the arguments are {@code null} + * @throws IOException if an I/O error occurs creating or writing to the file + */ + public static FileSystem write(final FileSystem fs, final Path path, + final Iterable lines, final Charset cs) + throws IOException { + + Objects.requireNonNull(path); + Objects.requireNonNull(lines); + Objects.requireNonNull(cs); + + CharsetEncoder encoder = cs.newEncoder(); + try (FSDataOutputStream out = fs.createFile(path).overwrite(true).build(); + BufferedWriter writer = + new BufferedWriter(new OutputStreamWriter(out, encoder))) { + for (CharSequence line : lines) { + writer.append(line); + writer.newLine(); + } + } + return fs; + } + + /** + * Write lines of text to a file. Each line is a char sequence and is written + * to the file in sequence with each line terminated by the platform's line + * separator, as defined by the system property {@code + * line.separator}. Characters are encoded into bytes using the specified + * charset. This utility method opens the file for writing, creating the file + * if it does not exist, or overwrites an existing file. + * + * @param fileContext the file context with which to create the file + * @param path the path to the file + * @param lines a Collection to iterate over the char sequences + * @param cs the charset to use for encoding + * + * @return the file context + * + * @throws NullPointerException if any of the arguments are {@code null} + * @throws IOException if an I/O error occurs creating or writing to the file + */ + public static FileContext write(final FileContext fileContext, + final Path path, final Iterable lines, + final Charset cs) throws IOException { + + Objects.requireNonNull(path); + Objects.requireNonNull(lines); + Objects.requireNonNull(cs); + + CharsetEncoder encoder = cs.newEncoder(); + try (FSDataOutputStream out = fileContext.create(path).overwrite(true).build(); + BufferedWriter writer = + new BufferedWriter(new OutputStreamWriter(out, encoder))) { + for (CharSequence line : lines) { + writer.append(line); + writer.newLine(); + } + } + return fileContext; + } + + /** + * Write a line of text to a file. Characters are encoded into bytes using the + * specified charset. This utility method opens the file for writing, creating + * the file if it does not exist, or overwrites an existing file. + * + * @param fs the file system with which to create the file + * @param path the path to the file + * @param charseq the char sequence to write to the file + * @param cs the charset to use for encoding + * + * @return the file system + * + * @throws NullPointerException if any of the arguments are {@code null} + * @throws IOException if an I/O error occurs creating or writing to the file + */ + public static FileSystem write(final FileSystem fs, final Path path, + final CharSequence charseq, final Charset cs) throws IOException { + + Objects.requireNonNull(path); + Objects.requireNonNull(charseq); + Objects.requireNonNull(cs); + + CharsetEncoder encoder = cs.newEncoder(); + try (FSDataOutputStream out = fs.createFile(path).overwrite(true).build(); + BufferedWriter writer = + new BufferedWriter(new OutputStreamWriter(out, encoder))) { + writer.append(charseq); + } + return fs; + } + + /** + * Write a line of text to a file. Characters are encoded into bytes using the + * specified charset. This utility method opens the file for writing, creating + * the file if it does not exist, or overwrites an existing file. + * + * @param FileContext the file context with which to create the file + * @param path the path to the file + * @param charseq the char sequence to write to the file + * @param cs the charset to use for encoding + * + * @return the file context + * + * @throws NullPointerException if any of the arguments are {@code null} + * @throws IOException if an I/O error occurs creating or writing to the file + */ + public static FileContext write(final FileContext fs, final Path path, + final CharSequence charseq, final Charset cs) throws IOException { + + Objects.requireNonNull(path); + Objects.requireNonNull(charseq); + Objects.requireNonNull(cs); + + CharsetEncoder encoder = cs.newEncoder(); + try (FSDataOutputStream out = fs.create(path).overwrite(true).build(); + BufferedWriter writer = + new BufferedWriter(new OutputStreamWriter(out, encoder))) { + writer.append(charseq); + } + return fs; + } + + /** + * Write a line of text to a file. Characters are encoded into bytes using + * UTF-8. This utility method opens the file for writing, creating the file if + * it does not exist, or overwrites an existing file. + * + * @param fs the files system with which to create the file + * @param path the path to the file + * @param charseq the char sequence to write to the file + * + * @return the file system + * + * @throws NullPointerException if any of the arguments are {@code null} + * @throws IOException if an I/O error occurs creating or writing to the file + */ + public static FileSystem write(final FileSystem fs, final Path path, + final CharSequence charseq) throws IOException { + return write(fs, path, charseq, StandardCharsets.UTF_8); + } + + /** + * Write a line of text to a file. Characters are encoded into bytes using + * UTF-8. This utility method opens the file for writing, creating the file if + * it does not exist, or overwrites an existing file. + * + * @param fileContext the files system with which to create the file + * @param path the path to the file + * @param charseq the char sequence to write to the file + * + * @return the file context + * + * @throws NullPointerException if any of the arguments are {@code null} + * @throws IOException if an I/O error occurs creating or writing to the file + */ + public static FileContext write(final FileContext fileContext, + final Path path, final CharSequence charseq) throws IOException { + return write(fileContext, path, charseq, StandardCharsets.UTF_8); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java index 3bc3cb2e9b07a..cf12ea3898a7f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java @@ -25,12 +25,12 @@ import java.util.EnumSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CompletableFuture; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.impl.OpenFileParameters; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; @@ -714,20 +714,15 @@ public FutureDataInputStreamBuilder openFile(final PathHandle pathHandle) @Override protected CompletableFuture openFileWithOptions( final Path path, - final Set mandatoryKeys, - final Configuration options, - final int bufferSize) throws IOException { - return fs.openFileWithOptions(path, mandatoryKeys, options, bufferSize); + final OpenFileParameters parameters) throws IOException { + return fs.openFileWithOptions(path, parameters); } @Override protected CompletableFuture openFileWithOptions( final PathHandle pathHandle, - final Set mandatoryKeys, - final Configuration options, - final int bufferSize) throws IOException { - return fs.openFileWithOptions(pathHandle, mandatoryKeys, options, - bufferSize); + final OpenFileParameters parameters) throws IOException { + return fs.openFileWithOptions(pathHandle, parameters); } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java index 731a52a7b4137..e197506edc88b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java @@ -26,13 +26,12 @@ import java.util.EnumSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CompletableFuture; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem.Statistics; +import org.apache.hadoop.fs.impl.OpenFileParameters; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; @@ -440,10 +439,8 @@ public Collection getAllStoragePolicies() @Override public CompletableFuture openFileWithOptions( final Path path, - final Set mandatoryKeys, - final Configuration options, - final int bufferSize) throws IOException { - return myFs.openFileWithOptions(path, mandatoryKeys, options, bufferSize); + final OpenFileParameters parameters) throws IOException { + return myFs.openFileWithOptions(path, parameters); } public boolean hasPathCapability(final Path path, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java index cfef1c3827917..07c16b22358c1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java @@ -42,4 +42,6 @@ public interface FsConstants { */ public static final URI VIEWFS_URI = URI.create("viewfs:///"); public static final String VIEWFS_SCHEME = "viewfs"; + String FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN = + "fs.viewfs.overload.scheme.target.%s.impl"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FutureDataInputStreamBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FutureDataInputStreamBuilder.java index 774d30927df2c..27a522e593001 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FutureDataInputStreamBuilder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FutureDataInputStreamBuilder.java @@ -47,4 +47,15 @@ public interface FutureDataInputStreamBuilder CompletableFuture build() throws IllegalArgumentException, UnsupportedOperationException, IOException; + + /** + * A FileStatus may be provided to the open request. + * It is up to the implementation whether to use this or not. + * @param status status. + * @return the builder. + */ + default FutureDataInputStreamBuilder withFileStatus(FileStatus status) { + return this; + } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java index a6b37b32bb564..1f0a06d7dd98f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.fs; -import com.google.protobuf.ByteString; +import org.apache.hadoop.thirdparty.protobuf.ByteString; import org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto; import java.io.IOException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java new file mode 100644 index 0000000000000..043f84612dc8b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import com.google.common.base.Preconditions; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.ipc.RemoteException; + +import java.io.IOException; +import java.util.List; + +/** + * A partial listing of the children of a parent directory. Since it is a + * partial listing, multiple PartialListing may need to be combined to obtain + * the full listing of a parent directory. + *

+ * ListingBatch behaves similar to a Future, in that getting the result via + * {@link #get()} will throw an Exception if there was a failure. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public class PartialListing { + private final Path listedPath; + private final List partialListing; + private final RemoteException exception; + + public PartialListing(Path listedPath, List partialListing) { + this(listedPath, partialListing, null); + } + + public PartialListing(Path listedPath, RemoteException exception) { + this(listedPath, null, exception); + } + + private PartialListing(Path listedPath, List partialListing, + RemoteException exception) { + Preconditions.checkArgument(partialListing == null ^ exception == null); + this.partialListing = partialListing; + this.listedPath = listedPath; + this.exception = exception; + } + + /** + * Partial listing of the path being listed. In the case where the path is + * a file. The list will be a singleton with the file itself. + * + * @return Partial listing of the path being listed. + * @throws IOException if there was an exception getting the listing. + */ + public List get() throws IOException { + if (exception != null) { + throw exception.unwrapRemoteException(); + } + return partialListing; + } + + /** + * Path being listed. + * + * @return the path being listed. + */ + public Path getListedPath() { + return listedPath; + } + + @Override + public String toString() { + return new ToStringBuilder(this) + .append("listedPath", listedPath) + .append("partialListing", partialListing) + .append("exception", exception) + .toString(); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java index 3472362dc4792..11cc93401748e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java @@ -40,14 +40,13 @@ public class QuotaUsage { /** Builder class for QuotaUsage. */ public static class Builder { public Builder() { - this.quota = -1; - this.spaceQuota = -1; + this.quota = -1L; + this.spaceQuota = -1L; typeConsumed = new long[StorageType.values().length]; typeQuota = new long[StorageType.values().length]; - for (int i = 0; i < typeQuota.length; i++) { - typeQuota[i] = -1; - } + + Arrays.fill(typeQuota, -1L); } public Builder fileAndDirectoryCount(long count) { @@ -71,9 +70,8 @@ public Builder spaceQuota(long spaceQuota) { } public Builder typeConsumed(long[] typeConsumed) { - for (int i = 0; i < typeConsumed.length; i++) { - this.typeConsumed[i] = typeConsumed[i]; - } + System.arraycopy(typeConsumed, 0, this.typeConsumed, 0, + typeConsumed.length); return this; } @@ -88,9 +86,7 @@ public Builder typeConsumed(StorageType type, long consumed) { } public Builder typeQuota(long[] typeQuota) { - for (int i = 0; i < typeQuota.length; i++) { - this.typeQuota[i] = typeQuota[i]; - } + System.arraycopy(typeQuota, 0, this.typeQuota, 0, typeQuota.length); return this; } @@ -153,32 +149,21 @@ public long getSpaceQuota() { /** Return storage type quota. */ public long getTypeQuota(StorageType type) { - return (typeQuota != null) ? typeQuota[type.ordinal()] : -1; + return (typeQuota != null) ? typeQuota[type.ordinal()] : -1L; } /** Return storage type consumed. */ public long getTypeConsumed(StorageType type) { - return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0; - } - - /** Return storage type quota. */ - private long[] getTypesQuota() { - return typeQuota; - } - - /** Return storage type quota. */ - private long[] getTypesConsumed() { - return typeConsumed; + return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0L; } /** Return true if any storage type quota has been set. */ public boolean isTypeQuotaSet() { - if (typeQuota == null) { - return false; - } - for (StorageType t : StorageType.getTypesSupportingQuota()) { - if (typeQuota[t.ordinal()] > 0) { - return true; + if (typeQuota != null) { + for (StorageType t : StorageType.getTypesSupportingQuota()) { + if (typeQuota[t.ordinal()] > 0L) { + return true; + } } } return false; @@ -186,45 +171,58 @@ public boolean isTypeQuotaSet() { /** Return true if any storage type consumption information is available. */ public boolean isTypeConsumedAvailable() { - if (typeConsumed == null) { - return false; - } - for (StorageType t : StorageType.getTypesSupportingQuota()) { - if (typeConsumed[t.ordinal()] > 0) { - return true; + if (typeConsumed != null) { + for (StorageType t : StorageType.getTypesSupportingQuota()) { + if (typeConsumed[t.ordinal()] > 0L) { + return true; + } } } return false; } @Override - public boolean equals(Object to) { - return (this == to || (to instanceof QuotaUsage && - getFileAndDirectoryCount() == - ((QuotaUsage) to).getFileAndDirectoryCount() && - getQuota() == ((QuotaUsage) to).getQuota() && - getSpaceConsumed() == ((QuotaUsage) to).getSpaceConsumed() && - getSpaceQuota() == ((QuotaUsage) to).getSpaceQuota() && - Arrays.equals(getTypesQuota(), ((QuotaUsage) to).getTypesQuota()) && - Arrays.equals(getTypesConsumed(), - ((QuotaUsage) to).getTypesConsumed()))); + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + + (int) (fileAndDirectoryCount ^ (fileAndDirectoryCount >>> 32)); + result = prime * result + (int) (quota ^ (quota >>> 32)); + result = prime * result + (int) (spaceConsumed ^ (spaceConsumed >>> 32)); + result = prime * result + (int) (spaceQuota ^ (spaceQuota >>> 32)); + result = prime * result + Arrays.hashCode(typeConsumed); + result = prime * result + Arrays.hashCode(typeQuota); + return result; } @Override - public int hashCode() { - long result = (getFileAndDirectoryCount() ^ getQuota() ^ - getSpaceConsumed() ^ getSpaceQuota()); - if (getTypesQuota() != null) { - for (long quota : getTypesQuota()) { - result ^= quota; - } + public boolean equals(Object obj) { + if (this == obj) { + return true; } - if (getTypesConsumed() != null) { - for (long consumed : getTypesConsumed()) { - result ^= consumed; - } + if (!(obj instanceof QuotaUsage)) { + return false; + } + QuotaUsage other = (QuotaUsage) obj; + if (fileAndDirectoryCount != other.fileAndDirectoryCount) { + return false; + } + if (quota != other.quota) { + return false; + } + if (spaceConsumed != other.spaceConsumed) { + return false; + } + if (spaceQuota != other.spaceQuota) { + return false; + } + if (!Arrays.equals(typeConsumed, other.typeConsumed)) { + return false; + } + if (!Arrays.equals(typeQuota, other.typeQuota)) { + return false; } - return (int)result; + return true; } /** @@ -292,11 +290,11 @@ protected String getQuotaUsage(boolean hOption) { String spaceQuotaStr = QUOTA_NONE; String spaceQuotaRem = QUOTA_INF; - if (quota > 0) { + if (quota > 0L) { quotaStr = formatSize(quota, hOption); quotaRem = formatSize(quota-fileAndDirectoryCount, hOption); } - if (spaceQuota >= 0) { + if (spaceQuota >= 0L) { spaceQuotaStr = formatSize(spaceQuota, hOption); spaceQuotaRem = formatSize(spaceQuota - spaceConsumed, hOption); } @@ -307,20 +305,20 @@ protected String getQuotaUsage(boolean hOption) { protected String getTypesQuotaUsage(boolean hOption, List types) { - StringBuffer content = new StringBuffer(); + StringBuilder content = new StringBuilder(); for (StorageType st : types) { long typeQuota = getTypeQuota(st); long typeConsumed = getTypeConsumed(st); String quotaStr = QUOTA_NONE; String quotaRem = QUOTA_INF; - if (typeQuota >= 0) { + if (typeQuota >= 0L) { quotaStr = formatSize(typeQuota, hOption); quotaRem = formatSize(typeQuota - typeConsumed, hOption); } - content.append(String.format(STORAGE_TYPE_SUMMARY_FORMAT, - quotaStr, quotaRem)); + content.append( + String.format(STORAGE_TYPE_SUMMARY_FORMAT, quotaStr, quotaRem)); } return content.toString(); } @@ -332,7 +330,7 @@ protected String getTypesQuotaUsage(boolean hOption, * @return storage header string */ public static String getStorageTypeHeader(List storageTypes) { - StringBuffer header = new StringBuffer(); + StringBuilder header = new StringBuilder(); for (StorageType st : storageTypes) { /* the field length is 13/17 for quota and remain quota diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java index 4b144bfddf6c6..28db2c9a1a227 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java @@ -20,6 +20,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.net.ConnectException; import java.net.URI; @@ -41,6 +42,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.Progressable; import org.slf4j.Logger; @@ -110,7 +112,9 @@ public void initialize(URI uri, Configuration conf) throws IOException { // get // get port information from uri, (overrides info in conf) int port = uri.getPort(); - port = (port == -1) ? FTP.DEFAULT_PORT : port; + if(port == -1){ + port = conf.getInt(FS_FTP_HOST_PORT, FTP.DEFAULT_PORT); + } conf.setInt(FS_FTP_HOST_PORT, port); // get user/password information from URI (overrides info in conf) @@ -340,8 +344,19 @@ public FSDataOutputStream create(Path file, FsPermission permission, // file. The FTP client connection is closed when close() is called on the // FSDataOutputStream. client.changeWorkingDirectory(parent.toUri().getPath()); - FSDataOutputStream fos = new FSDataOutputStream(client.storeFileStream(file - .getName()), statistics) { + OutputStream outputStream = client.storeFileStream(file.getName()); + + if (!FTPReply.isPositivePreliminary(client.getReplyCode())) { + // The ftpClient is an inconsistent state. Must close the stream + // which in turn will logout and disconnect from FTP server + if (outputStream != null) { + IOUtils.closeStream(outputStream); + } + disconnect(client); + throw new IOException("Unable to create file: " + file + ", Aborting"); + } + + FSDataOutputStream fos = new FSDataOutputStream(outputStream, statistics) { @Override public void close() throws IOException { super.close(); @@ -356,12 +371,6 @@ public void close() throws IOException { } } }; - if (!FTPReply.isPositivePreliminary(client.getReplyCode())) { - // The ftpClient is an inconsistent state. Must close the stream - // which in turn will logout and disconnect from FTP server - fos.close(); - throw new IOException("Unable to create file: " + file + ", Aborting"); - } return fos; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureDataInputStreamBuilderImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureDataInputStreamBuilderImpl.java index 2aa4a5d95fcc7..24a8d49747fe6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureDataInputStreamBuilderImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureDataInputStreamBuilderImpl.java @@ -26,12 +26,13 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FutureDataInputStreamBuilder; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathHandle; -import static com.google.common.base.Preconditions.checkNotNull; +import static java.util.Objects.requireNonNull; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; @@ -60,6 +61,12 @@ public abstract class FutureDataInputStreamBuilderImpl private int bufferSize; + /** + * File status passed in through a {@link #withFileStatus(FileStatus)} + * call; null otherwise. + */ + private FileStatus status; + /** * Construct from a {@link FileContext}. * @@ -69,8 +76,8 @@ public abstract class FutureDataInputStreamBuilderImpl */ protected FutureDataInputStreamBuilderImpl(@Nonnull FileContext fc, @Nonnull Path path) throws IOException { - super(checkNotNull(path)); - checkNotNull(fc); + super(requireNonNull(path, "path")); + requireNonNull(fc, "file context"); this.fileSystem = null; bufferSize = IO_FILE_BUFFER_SIZE_DEFAULT; } @@ -82,8 +89,8 @@ protected FutureDataInputStreamBuilderImpl(@Nonnull FileContext fc, */ protected FutureDataInputStreamBuilderImpl(@Nonnull FileSystem fileSystem, @Nonnull Path path) { - super(checkNotNull(path)); - this.fileSystem = checkNotNull(fileSystem); + super(requireNonNull(path, "path")); + this.fileSystem = requireNonNull(fileSystem, "fileSystem"); initFromFS(); } @@ -108,7 +115,7 @@ private void initFromFS() { } protected FileSystem getFS() { - checkNotNull(fileSystem); + requireNonNull(fileSystem, "fileSystem"); return fileSystem; } @@ -138,4 +145,18 @@ public FutureDataInputStreamBuilder builder() { public FutureDataInputStreamBuilder getThisBuilder() { return this; } + + @Override + public FutureDataInputStreamBuilder withFileStatus(FileStatus st) { + this.status = requireNonNull(st, "status"); + return this; + } + + /** + * Get any status set in {@link #withFileStatus(FileStatus)}. + * @return a status value or null. + */ + protected FileStatus getStatus() { + return status; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/OpenFileParameters.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/OpenFileParameters.java new file mode 100644 index 0000000000000..77b4ff52696a3 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/OpenFileParameters.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.impl; + +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; + +import static java.util.Objects.requireNonNull; + +/** + * All the parameters from the openFile builder for the + * {@code openFileWithOptions} commands. + * + * If/when new attributes added to the builder, this class will be extended. + */ +public class OpenFileParameters { + + /** + * Set of options declared as mandatory. + */ + private Set mandatoryKeys; + + /** + * Options set during the build sequence. + */ + private Configuration options; + + /** + * Buffer size. + */ + private int bufferSize; + + /** + * Optional file status. + */ + private FileStatus status; + + public OpenFileParameters() { + } + + public OpenFileParameters withMandatoryKeys(final Set keys) { + this.mandatoryKeys = requireNonNull(keys); + return this; + } + + public OpenFileParameters withOptions(final Configuration opts) { + this.options = requireNonNull(opts); + return this; + } + + public OpenFileParameters withBufferSize(final int size) { + this.bufferSize = size; + return this; + } + + public OpenFileParameters withStatus(final FileStatus st) { + this.status = st; + return this; + } + + public Set getMandatoryKeys() { + return mandatoryKeys; + } + + public Configuration getOptions() { + return options; + } + + public int getBufferSize() { + return bufferSize; + } + + public FileStatus getStatus() { + return status; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java index ed33357b51d2b..a91b50f2e9fa7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java @@ -19,7 +19,6 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.net.URLDecoder; @@ -516,20 +515,21 @@ public FSDataInputStream open(Path f, int bufferSize) throws IOException { disconnect(channel); throw new IOException(String.format(E_PATH_DIR, f)); } - InputStream is; try { // the path could be a symbolic link, so get the real path absolute = new Path("/", channel.realpath(absolute.toUri().getPath())); - - is = channel.get(absolute.toUri().getPath()); } catch (SftpException e) { throw new IOException(e); } - return new FSDataInputStream(new SFTPInputStream(is, statistics)){ + return new FSDataInputStream( + new SFTPInputStream(channel, absolute, statistics)){ @Override public void close() throws IOException { - super.close(); - disconnect(channel); + try { + super.close(); + } finally { + disconnect(channel); + } } }; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java index 7af299bd113e1..d0f9a8d0887ca 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java @@ -15,62 +15,107 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package org.apache.hadoop.fs.sftp; +import java.io.EOFException; import java.io.IOException; import java.io.InputStream; +import com.jcraft.jsch.ChannelSftp; +import com.jcraft.jsch.SftpATTRS; +import com.jcraft.jsch.SftpException; + +import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; /** SFTP FileSystem input stream. */ class SFTPInputStream extends FSInputStream { - public static final String E_SEEK_NOTSUPPORTED = "Seek not supported"; - public static final String E_NULL_INPUTSTREAM = "Null InputStream"; - public static final String E_STREAM_CLOSED = "Stream closed"; - + private final ChannelSftp channel; + private final Path path; private InputStream wrappedStream; private FileSystem.Statistics stats; private boolean closed; private long pos; + private long nextPos; + private long contentLength; - SFTPInputStream(InputStream stream, FileSystem.Statistics stats) { - - if (stream == null) { - throw new IllegalArgumentException(E_NULL_INPUTSTREAM); + SFTPInputStream(ChannelSftp channel, Path path, FileSystem.Statistics stats) + throws IOException { + try { + this.channel = channel; + this.path = path; + this.stats = stats; + this.wrappedStream = channel.get(path.toUri().getPath()); + SftpATTRS stat = channel.lstat(path.toString()); + this.contentLength = stat.getSize(); + } catch (SftpException e) { + throw new IOException(e); } - this.wrappedStream = stream; - this.stats = stats; + } - this.pos = 0; - this.closed = false; + @Override + public synchronized void seek(long position) throws IOException { + checkNotClosed(); + if (position < 0) { + throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK); + } + nextPos = position; } @Override - public void seek(long position) throws IOException { - throw new IOException(E_SEEK_NOTSUPPORTED); + public synchronized int available() throws IOException { + checkNotClosed(); + long remaining = contentLength - nextPos; + if (remaining > Integer.MAX_VALUE) { + return Integer.MAX_VALUE; + } + return (int) remaining; + } + + private void seekInternal() throws IOException { + if (pos == nextPos) { + return; + } + if (nextPos > pos) { + long skipped = wrappedStream.skip(nextPos - pos); + pos = pos + skipped; + } + if (nextPos < pos) { + wrappedStream.close(); + try { + wrappedStream = channel.get(path.toUri().getPath()); + pos = wrappedStream.skip(nextPos); + } catch (SftpException e) { + throw new IOException(e); + } + } } @Override public boolean seekToNewSource(long targetPos) throws IOException { - throw new IOException(E_SEEK_NOTSUPPORTED); + return false; } @Override - public long getPos() throws IOException { - return pos; + public synchronized long getPos() throws IOException { + return nextPos; } @Override public synchronized int read() throws IOException { - if (closed) { - throw new IOException(E_STREAM_CLOSED); + checkNotClosed(); + if (this.contentLength == 0 || (nextPos >= contentLength)) { + return -1; } - + seekInternal(); int byteRead = wrappedStream.read(); if (byteRead >= 0) { pos++; + nextPos++; } if (stats != null & byteRead >= 0) { stats.incrementBytesRead(1); @@ -78,23 +123,6 @@ public synchronized int read() throws IOException { return byteRead; } - public synchronized int read(byte[] buf, int off, int len) - throws IOException { - if (closed) { - throw new IOException(E_STREAM_CLOSED); - } - - int result = wrappedStream.read(buf, off, len); - if (result > 0) { - pos += result; - } - if (stats != null & result > 0) { - stats.incrementBytesRead(result); - } - - return result; - } - public synchronized void close() throws IOException { if (closed) { return; @@ -103,4 +131,12 @@ public synchronized void close() throws IOException { wrappedStream.close(); closed = true; } + + private void checkNotClosed() throws IOException { + if (closed) { + throw new IOException( + path.toUri() + ": " + FSExceptionMessages.STREAM_IS_CLOSED + ); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java index 0802a00b01bc8..ca9961aeb65a0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java @@ -415,7 +415,6 @@ protected void copyStreamToTarget(InputStream in, PathData target) targetFs.setWriteChecksum(writeChecksum); targetFs.writeStreamToFile(in, tempTarget, lazyPersist, direct); if (!direct) { - targetFs.deleteOnExit(tempTarget.path); targetFs.rename(tempTarget, target); } } finally { @@ -491,25 +490,18 @@ void writeStreamToFile(InputStream in, PathData target, throws IOException { FSDataOutputStream out = null; try { - out = create(target, lazyPersist, direct); + out = create(target, lazyPersist); IOUtils.copyBytes(in, out, getConf(), true); - } catch (IOException e) { - // failure: clean up if we got as far as creating the file - if (!direct && out != null) { - try { - fs.delete(target.path, false); - } catch (IOException ignored) { - } - } - throw e; } finally { + if (!direct) { + deleteOnExit(target.path); + } IOUtils.closeStream(out); // just in case copyBytes didn't } } // tag created files as temp files - FSDataOutputStream create(PathData item, boolean lazyPersist, - boolean direct) + FSDataOutputStream create(PathData item, boolean lazyPersist) throws IOException { if (lazyPersist) { long defaultBlockSize; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java index 4622c75fbd410..39958a9cb1c9d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java @@ -239,26 +239,35 @@ protected void processOptions(LinkedList args) * Copy local files to a remote filesystem */ public static class Put extends CommandWithDestination { + private ThreadPoolExecutor executor = null; + private int numThreads = 1; + + private static final int MAX_THREADS = + Runtime.getRuntime().availableProcessors() * 2; + public static final String NAME = "put"; public static final String USAGE = - "[-f] [-p] [-l] [-d] ... "; + "[-f] [-p] [-l] [-d] [-t ] ... "; public static final String DESCRIPTION = - "Copy files from the local file system " + - "into fs. Copying fails if the file already " + - "exists, unless the -f flag is given.\n" + - "Flags:\n" + - " -p : Preserves access and modification times, ownership and the mode.\n" + - " -f : Overwrites the destination if it already exists.\n" + - " -l : Allow DataNode to lazily persist the file to disk. Forces\n" + - " replication factor of 1. This flag will result in reduced\n" + - " durability. Use with care.\n" + + "Copy files from the local file system " + + "into fs. Copying fails if the file already " + + "exists, unless the -f flag is given.\n" + + "Flags:\n" + + " -p : Preserves timestamps, ownership and the mode.\n" + + " -f : Overwrites the destination if it already exists.\n" + + " -t : Number of threads to be used, default is 1.\n" + + " -l : Allow DataNode to lazily persist the file to disk. Forces" + + " replication factor of 1. This flag will result in reduced" + + " durability. Use with care.\n" + " -d : Skip creation of temporary file(._COPYING_).\n"; @Override protected void processOptions(LinkedList args) throws IOException { CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, "f", "p", "l", "d"); + cf.addOptionWithValue("t"); cf.parse(args); + setNumberThreads(cf.getOptValue("t")); setOverwrite(cf.getOpt("f")); setPreserve(cf.getOpt("p")); setLazyPersist(cf.getOpt("l")); @@ -288,32 +297,22 @@ protected void processArguments(LinkedList args) copyStreamToTarget(System.in, getTargetPath(args.get(0))); return; } - super.processArguments(args); - } - } - public static class CopyFromLocal extends Put { - private ThreadPoolExecutor executor = null; - private int numThreads = 1; + executor = new ThreadPoolExecutor(numThreads, numThreads, 1, + TimeUnit.SECONDS, new ArrayBlockingQueue<>(1024), + new ThreadPoolExecutor.CallerRunsPolicy()); + super.processArguments(args); - private static final int MAX_THREADS = - Runtime.getRuntime().availableProcessors() * 2; - public static final String NAME = "copyFromLocal"; - public static final String USAGE = - "[-f] [-p] [-l] [-d] [-t ] ... "; - public static final String DESCRIPTION = - "Copy files from the local file system " + - "into fs. Copying fails if the file already " + - "exists, unless the -f flag is given.\n" + - "Flags:\n" + - " -p : Preserves access and modification times, ownership and the" + - " mode.\n" + - " -f : Overwrites the destination if it already exists.\n" + - " -t : Number of threads to be used, default is 1.\n" + - " -l : Allow DataNode to lazily persist the file to disk. Forces" + - " replication factor of 1. This flag will result in reduced" + - " durability. Use with care.\n" + - " -d : Skip creation of temporary file(._COPYING_).\n"; + // issue the command and then wait for it to finish + executor.shutdown(); + try { + executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES); + } catch (InterruptedException e) { + executor.shutdownNow(); + displayError(e); + Thread.currentThread().interrupt(); + } + } private void setNumberThreads(String numberThreadsString) { if (numberThreadsString == null) { @@ -330,22 +329,6 @@ private void setNumberThreads(String numberThreadsString) { } } - @Override - protected void processOptions(LinkedList args) throws IOException { - CommandFormat cf = - new CommandFormat(1, Integer.MAX_VALUE, "f", "p", "l", "d"); - cf.addOptionWithValue("t"); - cf.parse(args); - setNumberThreads(cf.getOptValue("t")); - setOverwrite(cf.getOpt("f")); - setPreserve(cf.getOpt("p")); - setLazyPersist(cf.getOpt("l")); - setDirectWrite(cf.getOpt("d")); - getRemoteDestination(args); - // should have a -r option - setRecursive(true); - } - private void copyFile(PathData src, PathData target) throws IOException { if (isPathRecursable(src)) { throw new PathIsDirectoryException(src.toString()); @@ -372,25 +355,6 @@ protected void copyFileToTarget(PathData src, PathData target) executor.submit(task); } - @Override - protected void processArguments(LinkedList args) - throws IOException { - executor = new ThreadPoolExecutor(numThreads, numThreads, 1, - TimeUnit.SECONDS, new ArrayBlockingQueue<>(1024), - new ThreadPoolExecutor.CallerRunsPolicy()); - super.processArguments(args); - - // issue the command and then wait for it to finish - executor.shutdown(); - try { - executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES); - } catch (InterruptedException e) { - executor.shutdownNow(); - displayError(e); - Thread.currentThread().interrupt(); - } - } - @VisibleForTesting public int getNumThreads() { return numThreads; @@ -401,6 +365,12 @@ public ThreadPoolExecutor getExecutor() { return executor; } } + + public static class CopyFromLocal extends Put { + public static final String NAME = "copyFromLocal"; + public static final String USAGE = Put.USAGE; + public static final String DESCRIPTION = "Identical to the -put command."; + } public static class CopyToLocal extends Get { public static final String NAME = "copyToLocal"; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java index 22d8be53e97a6..ab7e1951bcd3f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java @@ -56,13 +56,14 @@ public static void registerCommands(CommandFactory factory) { //return the quota, namespace count and disk space usage. private static final String OPTION_QUOTA_AND_USAGE = "u"; private static final String OPTION_ECPOLICY = "e"; + private static final String OPTION_SNAPSHOT_COUNT = "s"; public static final String NAME = "count"; public static final String USAGE = "[-" + OPTION_QUOTA + "] [-" + OPTION_HUMAN + "] [-" + OPTION_HEADER + "] [-" + OPTION_TYPE + " []] [-" + OPTION_QUOTA_AND_USAGE + "] [-" + OPTION_EXCLUDE_SNAPSHOT - + "] [-" + OPTION_ECPOLICY + + "] [-" + OPTION_ECPOLICY + "] [-" + OPTION_SNAPSHOT_COUNT + "] ..."; public static final String DESCRIPTION = "Count the number of directories, files and bytes under the paths\n" + @@ -93,7 +94,8 @@ public static void registerCommands(CommandFactory factory) { "the storage types.\n" + "The -" + OPTION_QUOTA_AND_USAGE + " option shows the quota and \n" + "the usage against the quota without the detailed content summary."+ - "The -"+ OPTION_ECPOLICY +" option shows the erasure coding policy."; + "The -" + OPTION_ECPOLICY + " option shows the erasure coding policy." + + "The -" + OPTION_SNAPSHOT_COUNT + " option shows snapshot counts."; private boolean showQuotas; private boolean humanReadable; @@ -102,6 +104,7 @@ public static void registerCommands(CommandFactory factory) { private boolean showQuotasAndUsageOnly; private boolean excludeSnapshots; private boolean displayECPolicy; + private boolean showSnapshot; /** Constructor */ public Count() {} @@ -123,7 +126,7 @@ protected void processOptions(LinkedList args) { CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, OPTION_QUOTA, OPTION_HUMAN, OPTION_HEADER, OPTION_QUOTA_AND_USAGE, OPTION_EXCLUDE_SNAPSHOT, - OPTION_ECPOLICY); + OPTION_ECPOLICY, OPTION_SNAPSHOT_COUNT); cf.addOptionWithValue(OPTION_TYPE); cf.parse(args); if (args.isEmpty()) { // default path is the current working directory @@ -134,6 +137,7 @@ protected void processOptions(LinkedList args) { showQuotasAndUsageOnly = cf.getOpt(OPTION_QUOTA_AND_USAGE); excludeSnapshots = cf.getOpt(OPTION_EXCLUDE_SNAPSHOT); displayECPolicy = cf.getOpt(OPTION_ECPOLICY); + showSnapshot = cf.getOpt(OPTION_SNAPSHOT_COUNT); if (showQuotas || showQuotasAndUsageOnly) { String types = cf.getOptValue(OPTION_TYPE); @@ -165,6 +169,9 @@ protected void processOptions(LinkedList args) { if(displayECPolicy){ headString.append("ERASURECODING_POLICY "); } + if (showSnapshot) { + headString.append(ContentSummary.getSnapshotHeader()); + } headString.append("PATHNAME"); out.println(headString.toString()); } @@ -205,6 +212,10 @@ protected void processPath(PathData src) throws IOException { outputString.append(summary.getErasureCodingPolicy()) .append(" "); } + if (showSnapshot) { + ContentSummary summary = src.fs.getContentSummary(src.path); + outputString.append(summary.toSnapshot(isHumanReadable())); + } outputString.append(src); out.println(outputString.toString()); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java index 784bbf33f7826..ea8378dc4551b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java @@ -108,7 +108,7 @@ protected void processRawArguments(LinkedList args) HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY, HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_DEFAULT); if (displayWarnings) { - final String defaultFs = getConf().get(FS_DEFAULT_NAME_KEY); + final String defaultFs = getConf().getTrimmed(FS_DEFAULT_NAME_KEY); final boolean missingDefaultFs = defaultFs == null || defaultFs.equals(FS_DEFAULT_NAME_DEFAULT); if (missingDefaultFs) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java index 5ef42775ea58b..c20293e1a5adb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.PathExistsException; -import org.apache.hadoop.fs.shell.CopyCommands.Put; +import org.apache.hadoop.fs.shell.CopyCommands.CopyFromLocal; /** Various commands for moving files */ @InterfaceAudience.Private @@ -41,12 +41,22 @@ public static void registerCommands(CommandFactory factory) { /** * Move local files to a remote filesystem */ - public static class MoveFromLocal extends Put { + public static class MoveFromLocal extends CopyFromLocal { public static final String NAME = "moveFromLocal"; - public static final String USAGE = " ... "; + public static final String USAGE = + "[-f] [-p] [-l] [-d] ... "; public static final String DESCRIPTION = - "Same as -put, except that the source is " + - "deleted after it's copied."; + "Same as -put, except that the source is " + + "deleted after it's copied\n" + + "and -t option has not yet implemented."; + + @Override + protected void processOptions(LinkedList args) throws IOException { + if(args.contains("-t")) { + throw new CommandFormat.UnknownOptionException("-t"); + } + super.processOptions(args); + } @Override protected void processPath(PathData src, PathData target) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java index 4c3dae9a9f99b..6dd1f6589478e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java @@ -135,6 +135,17 @@ public static void addLinkMerge(Configuration conf, final URI[] targets) { addLinkMerge(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, targets); } + /** + * Add nfly link to configuration for the given mount table. + */ + public static void addLinkNfly(Configuration conf, String mountTableName, + String src, String settings, final String targets) { + conf.set( + getConfigViewFsPrefix(mountTableName) + "." + + Constants.CONFIG_VIEWFS_LINK_NFLY + "." + settings + "." + src, + targets); + } + /** * * @param conf @@ -149,9 +160,7 @@ public static void addLinkNfly(Configuration conf, String mountTableName, settings = settings == null ? "minReplication=2,repairOnRead=true" : settings; - - conf.set(getConfigViewFsPrefix(mountTableName) + "." + - Constants.CONFIG_VIEWFS_LINK_NFLY + "." + settings + "." + src, + addLinkNfly(conf, mountTableName, src, settings, StringUtils.uriToString(targets)); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java index 37f1a16800e7d..0a5d4b46ce2d8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java @@ -30,6 +30,11 @@ public interface Constants { * Prefix for the config variable prefix for the ViewFs mount-table */ public static final String CONFIG_VIEWFS_PREFIX = "fs.viewfs.mounttable"; + + /** + * Prefix for the config variable for the ViewFs mount-table path. + */ + String CONFIG_VIEWFS_MOUNTTABLE_PATH = CONFIG_VIEWFS_PREFIX + ".path"; /** * Prefix for the home dir for the mount table - if not specified diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/FsGetter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/FsGetter.java new file mode 100644 index 0000000000000..071af11e63bf2 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/FsGetter.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; +import java.net.URI; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; + +/** + * File system instance getter. + */ +@Private +class FsGetter { + + /** + * Gets new file system instance of given uri. + */ + public FileSystem getNewInstance(URI uri, Configuration conf) + throws IOException { + return FileSystem.newInstance(uri, conf); + } + + /** + * Gets file system instance of given uri. + */ + public FileSystem get(URI uri, Configuration conf) throws IOException { + return FileSystem.get(uri, conf); + } +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java new file mode 100644 index 0000000000000..3968e3650cf39 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An implementation for Apache Hadoop compatible file system based mount-table + * file loading. + */ +public class HCFSMountTableConfigLoader implements MountTableConfigLoader { + private static final String REGEX_DOT = "[.]"; + private static final Logger LOGGER = + LoggerFactory.getLogger(HCFSMountTableConfigLoader.class); + private Path mountTable = null; + + /** + * Loads the mount-table configuration from hadoop compatible file system and + * add the configuration items to given configuration. Mount-table + * configuration format should be suffixed with version number. + * Format: mount-table..xml + * Example: mount-table.1.xml + * When user wants to update mount-table, the expectation is to upload new + * mount-table configuration file with monotonically increasing integer as + * version number. This API loads the highest version number file. We can + * also configure single file path directly. + * + * @param mountTableConfigPath : A directory path where mount-table files + * stored or a mount-table file path. We recommend to configure + * directory with the mount-table version files. + * @param conf : to add the mount table as resource. + */ + @Override + public void load(String mountTableConfigPath, Configuration conf) + throws IOException { + this.mountTable = new Path(mountTableConfigPath); + String scheme = mountTable.toUri().getScheme(); + FsGetter fsGetter = new ViewFileSystemOverloadScheme.ChildFsGetter(scheme); + try (FileSystem fs = fsGetter.getNewInstance(mountTable.toUri(), conf)) { + RemoteIterator listFiles = + fs.listFiles(mountTable, false); + LocatedFileStatus lfs = null; + int higherVersion = -1; + while (listFiles.hasNext()) { + LocatedFileStatus curLfs = listFiles.next(); + String cur = curLfs.getPath().getName(); + String[] nameParts = cur.split(REGEX_DOT); + if (nameParts.length < 2) { + logInvalidFileNameFormat(cur); + continue; // invalid file name + } + int curVersion = higherVersion; + try { + curVersion = Integer.parseInt(nameParts[nameParts.length - 2]); + } catch (NumberFormatException nfe) { + logInvalidFileNameFormat(cur); + continue; + } + + if (curVersion > higherVersion) { + higherVersion = curVersion; + lfs = curLfs; + } + } + + if (lfs == null) { + // No valid mount table file found. + // TODO: Should we fail? Currently viewfs init will fail if no mount + // links anyway. + LOGGER.warn("No valid mount-table file exist at: {}. At least one " + + "mount-table file should present with the name format: " + + "mount-table..xml", mountTableConfigPath); + return; + } + // Latest version file. + Path latestVersionMountTable = lfs.getPath(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Loading the mount-table {} into configuration.", + latestVersionMountTable); + } + try (FSDataInputStream open = fs.open(latestVersionMountTable)) { + Configuration newConf = new Configuration(false); + newConf.addResource(open); + // This will add configuration props as resource, instead of stream + // itself. So, that stream can be closed now. + conf.addResource(newConf); + } + } + } + + private void logInvalidFileNameFormat(String cur) { + LOGGER.warn("Invalid file name format for mount-table version file: {}. " + + "The valid file name format is mount-table-name..xml", + cur); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java index 69923438ecc20..50c839b52b654 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java @@ -123,6 +123,7 @@ static class INodeDir extends INode { private final Map> children = new HashMap<>(); private T internalDirFs = null; //filesystem of this internal directory private boolean isRoot = false; + private INodeLink fallbackLink = null; INodeDir(final String pathToNode, final UserGroupInformation aUgi) { super(pathToNode, aUgi); @@ -149,6 +150,17 @@ boolean isRoot() { return isRoot; } + INodeLink getFallbackLink() { + return fallbackLink; + } + + void addFallbackLink(INodeLink link) throws IOException { + if (!isRoot) { + throw new IOException("Fallback link can only be added for root"); + } + this.fallbackLink = link; + } + Map> getChildren() { return Collections.unmodifiableMap(children); } @@ -580,6 +592,7 @@ protected InodeTree(final Configuration config, final String viewName) } } rootFallbackLink = fallbackLink; + getRootDir().addFallbackLink(rootFallbackLink); } if (!gotMountTableEntry) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/MountTableConfigLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/MountTableConfigLoader.java new file mode 100644 index 0000000000000..bc2c3ea93c58c --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/MountTableConfigLoader.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; + +/** + * An interface for loading mount-table configuration. This class can have more + * APIs like refreshing mount tables automatically etc. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface MountTableConfigLoader { + + /** + * Loads the mount-table configuration into given configuration. + * + * @param mountTableConfigPath - Path of the mount table. It can be a file or + * a directory in the case of multiple versions of mount-table + * files(Recommended option). + * @param conf - Configuration object to add mount table. + */ + void load(String mountTableConfigPath, Configuration conf) + throws IOException; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NflyFSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NflyFSystem.java index 53966b8afbfcc..85af68af31434 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NflyFSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NflyFSystem.java @@ -18,8 +18,8 @@ package org.apache.hadoop.fs.viewfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -59,7 +59,7 @@ */ @Private final class NflyFSystem extends FileSystem { - private static final Log LOG = LogFactory.getLog(NflyFSystem.class); + private static final Logger LOG = LoggerFactory.getLogger(NflyFSystem.class); private static final String NFLY_TMP_PREFIX = "_nfly_tmp_"; enum NflyKey { @@ -212,6 +212,21 @@ private static String getRack(String rackString) { */ private NflyFSystem(URI[] uris, Configuration conf, int minReplication, EnumSet nflyFlags) throws IOException { + this(uris, conf, minReplication, nflyFlags, null); + } + + /** + * Creates a new Nfly instance. + * + * @param uris the list of uris in the mount point + * @param conf configuration object + * @param minReplication minimum copies to commit a write op + * @param nflyFlags modes such readMostRecent + * @param fsGetter to get the file system instance with the given uri + * @throws IOException + */ + private NflyFSystem(URI[] uris, Configuration conf, int minReplication, + EnumSet nflyFlags, FsGetter fsGetter) throws IOException { if (uris.length < minReplication) { throw new IOException(minReplication + " < " + uris.length + ": Minimum replication < #destinations"); @@ -238,8 +253,14 @@ private NflyFSystem(URI[] uris, Configuration conf, int minReplication, nodes = new NflyNode[uris.length]; final Iterator rackIter = rackStrings.iterator(); for (int i = 0; i < nodes.length; i++) { - nodes[i] = new NflyNode(hostStrings.get(i), rackIter.next(), uris[i], - conf); + if (fsGetter != null) { + nodes[i] = new NflyNode(hostStrings.get(i), rackIter.next(), + new ChRootedFileSystem(fsGetter.getNewInstance(uris[i], conf), + uris[i])); + } else { + nodes[i] = + new NflyNode(hostStrings.get(i), rackIter.next(), uris[i], conf); + } } // sort all the uri's by distance from myNode, the local file system will // automatically be the the first one. @@ -921,7 +942,7 @@ private static void processThrowable(NflyNode nflyNode, String op, * @throws IOException */ static FileSystem createFileSystem(URI[] uris, Configuration conf, - String settings) throws IOException { + String settings, FsGetter fsGetter) throws IOException { // assert settings != null int minRepl = DEFAULT_MIN_REPLICATION; EnumSet nflyFlags = EnumSet.noneOf(NflyKey.class); @@ -946,6 +967,6 @@ static FileSystem createFileSystem(URI[] uris, Configuration conf, throw new IllegalArgumentException(nflyKey + ": Infeasible"); } } - return new NflyFSystem(uris, conf, minRepl, nflyFlags); + return new NflyFSystem(uris, conf, minRepl, nflyFlags, fsGetter); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index faa374a39789b..4f02feeebec8b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -18,9 +18,9 @@ package org.apache.hadoop.fs.viewfs; import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; -import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555; import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE; import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT; +import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555; import java.io.FileNotFoundException; import java.io.IOException; @@ -35,9 +35,9 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Objects; import java.util.Set; -import java.util.Map.Entry; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -96,16 +96,28 @@ static AccessControlException readOnlyMountTable(final String operation, return readOnlyMountTable(operation, p.toString()); } + /** + * Gets file system creator instance. + */ + protected FsGetter fsGetter() { + return new FsGetter(); + } + /** * Caching children filesystems. HADOOP-15565. */ static class InnerCache { private Map map = new HashMap<>(); + private FsGetter fsCreator; + + InnerCache(FsGetter fsCreator) { + this.fsCreator = fsCreator; + } FileSystem get(URI uri, Configuration config) throws IOException { Key key = new Key(uri); if (map.get(key) == null) { - FileSystem fs = FileSystem.newInstance(uri, config); + FileSystem fs = fsCreator.getNewInstance(uri, config); map.put(key, fs); return fs; } else { @@ -193,7 +205,7 @@ public URI[] getTargetFileSystemURIs() { final long creationTime; // of the the mount table final UserGroupInformation ugi; // the user/group of user who created mtable - URI myUri; + private URI myUri; private Path workingDir; Configuration config; InodeTree fsState; // the fs state; ie the mount table @@ -255,13 +267,13 @@ public void initialize(final URI theUri, final Configuration conf) config = conf; enableInnerCache = config.getBoolean(CONFIG_VIEWFS_ENABLE_INNER_CACHE, CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT); - final InnerCache innerCache = new InnerCache(); + FsGetter fsGetter = fsGetter(); + final InnerCache innerCache = new InnerCache(fsGetter); // Now build client side view (i.e. client side mount table) from config. final String authority = theUri.getAuthority(); try { - myUri = new URI(FsConstants.VIEWFS_SCHEME, authority, "/", null, null); + myUri = new URI(getScheme(), authority, "/", null, null); fsState = new InodeTree(conf, authority) { - @Override protected FileSystem getTargetFileSystem(final URI uri) throws URISyntaxException, IOException { @@ -269,7 +281,7 @@ protected FileSystem getTargetFileSystem(final URI uri) if (enableInnerCache) { fs = innerCache.get(uri, config); } else { - fs = FileSystem.get(uri, config); + fs = fsGetter.get(uri, config); } return new ChRootedFileSystem(fs, uri); } @@ -283,7 +295,8 @@ protected FileSystem getTargetFileSystem(final INodeDir dir) @Override protected FileSystem getTargetFileSystem(final String settings, final URI[] uris) throws URISyntaxException, IOException { - return NflyFSystem.createFileSystem(uris, config, settings); + return NflyFSystem.createFileSystem(uris, config, settings, + fsGetter); } }; workingDir = this.getHomeDirectory(); @@ -1167,10 +1180,19 @@ public FileStatus getFileStatus(Path f) throws IOException { } + /** + * {@inheritDoc} + * + * Note: listStatus on root("/") considers listing from fallbackLink if + * available. If the same directory name is present in configured mount + * path as well as in fallback link, then only the configured mount path + * will be listed in the returned result. + */ @Override public FileStatus[] listStatus(Path f) throws AccessControlException, FileNotFoundException, IOException { checkPathIsSlash(f); + FileStatus[] fallbackStatuses = listStatusForFallbackLink(); FileStatus[] result = new FileStatus[theInternalDir.getChildren().size()]; int i = 0; for (Entry> iEntry : @@ -1193,7 +1215,45 @@ public FileStatus[] listStatus(Path f) throws AccessControlException, myUri, null)); } } - return result; + if (fallbackStatuses.length > 0) { + return consolidateFileStatuses(fallbackStatuses, result); + } else { + return result; + } + } + + private FileStatus[] consolidateFileStatuses(FileStatus[] fallbackStatuses, + FileStatus[] mountPointStatuses) { + ArrayList result = new ArrayList<>(); + Set pathSet = new HashSet<>(); + for (FileStatus status : mountPointStatuses) { + result.add(status); + pathSet.add(status.getPath().getName()); + } + for (FileStatus status : fallbackStatuses) { + if (!pathSet.contains(status.getPath().getName())) { + result.add(status); + } + } + return result.toArray(new FileStatus[0]); + } + + private FileStatus[] listStatusForFallbackLink() throws IOException { + if (theInternalDir.isRoot() && + theInternalDir.getFallbackLink() != null) { + FileSystem linkedFs = + theInternalDir.getFallbackLink().getTargetFileSystem(); + // Fallback link is only applicable for root + FileStatus[] statuses = linkedFs.listStatus(new Path("/")); + for (FileStatus status : statuses) { + // Fix the path back to viewfs scheme + status.setPath( + new Path(myUri.toString(), status.getPath().getName())); + } + return statuses; + } else { + return new FileStatus[0]; + } } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java new file mode 100644 index 0000000000000..36f9cd104cb6b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java @@ -0,0 +1,259 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.net.URI; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.UnsupportedFileSystemException; + +/****************************************************************************** + * This class is extended from the ViewFileSystem for the overloaded scheme + * file system. Mount link configurations and in-memory mount table + * building behaviors are inherited from ViewFileSystem. Unlike ViewFileSystem + * scheme (viewfs://), the users would be able to use any scheme. + * + * To use this class, the following configurations need to be added in + * core-site.xml file. + * 1) fs..impl + * = org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme + * 2) fs.viewfs.overload.scheme.target..impl + * = " + * + * Here can be any scheme, but with that scheme there should be a + * hadoop compatible file system available. Second configuration value should + * be the respective scheme's file system implementation class. + * Example: if scheme is configured with "hdfs", then the 2nd configuration + * class name will be org.apache.hadoop.hdfs.DistributedFileSystem. + * if scheme is configured with "s3a", then the 2nd configuration class name + * will be org.apache.hadoop.fs.s3a.S3AFileSystem. + * + * Use Case 1: + * =========== + * If users want some of their existing cluster (hdfs://Cluster) + * data to mount with other hdfs and object store clusters(hdfs://NN1, + * o3fs://bucket1.volume1/, s3a://bucket1/) + * + * fs.viewfs.mounttable.Cluster./user = hdfs://NN1/user + * fs.viewfs.mounttable.Cluster./data = o3fs://bucket1.volume1/data + * fs.viewfs.mounttable.Cluster./backup = s3a://bucket1/backup/ + * + * Op1: Create file hdfs://Cluster/user/fileA will go to hdfs://NN1/user/fileA + * Op2: Create file hdfs://Cluster/data/datafile will go to + * o3fs://bucket1.volume1/data/datafile + * Op3: Create file hdfs://Cluster/backup/data.zip will go to + * s3a://bucket1/backup/data.zip + * + * Use Case 2: + * =========== + * If users want some of their existing cluster (s3a://bucketA/) + * data to mount with other hdfs and object store clusters + * (hdfs://NN1, o3fs://bucket1.volume1/) + * + * fs.viewfs.mounttable.bucketA./user = hdfs://NN1/user + * fs.viewfs.mounttable.bucketA./data = o3fs://bucket1.volume1/data + * fs.viewfs.mounttable.bucketA./salesDB = s3a://bucketA/salesDB/ + * + * Op1: Create file s3a://bucketA/user/fileA will go to hdfs://NN1/user/fileA + * Op2: Create file s3a://bucketA/data/datafile will go to + * o3fs://bucket1.volume1/data/datafile + * Op3: Create file s3a://bucketA/salesDB/dbfile will go to + * s3a://bucketA/salesDB/dbfile + *****************************************************************************/ +@InterfaceAudience.LimitedPrivate({ "MapReduce", "HBase", "Hive" }) +@InterfaceStability.Evolving +public class ViewFileSystemOverloadScheme extends ViewFileSystem { + private URI myUri; + public ViewFileSystemOverloadScheme() throws IOException { + super(); + } + + @Override + public String getScheme() { + return myUri.getScheme(); + } + + @Override + public void initialize(URI theUri, Configuration conf) throws IOException { + this.myUri = theUri; + if (LOG.isDebugEnabled()) { + LOG.debug("Initializing the ViewFileSystemOverloadScheme with the uri: " + + theUri); + } + String mountTableConfigPath = + conf.get(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH); + if (null != mountTableConfigPath) { + MountTableConfigLoader loader = new HCFSMountTableConfigLoader(); + loader.load(mountTableConfigPath, conf); + } else { + // TODO: Should we fail here.? + if (LOG.isDebugEnabled()) { + LOG.debug( + "Missing configuration for fs.viewfs.mounttable.path. Proceeding" + + "with core-site.xml mount-table information if avaialable."); + } + } + super.initialize(theUri, conf); + } + + /** + * This method is overridden because in ViewFileSystemOverloadScheme if + * overloaded scheme matches with mounted target fs scheme, file system + * should be created without going into fs..impl based resolution. + * Otherwise it will end up in an infinite loop as the target will be + * resolved again to ViewFileSystemOverloadScheme as fs..impl points + * to ViewFileSystemOverloadScheme. So, below method will initialize the + * fs.viewfs.overload.scheme.target..impl. Other schemes can + * follow fs.newInstance + */ + @Override + protected FsGetter fsGetter() { + return new ChildFsGetter(getScheme()); + } + + /** + * This class checks whether the rooScheme is same as URI scheme. If both are + * same, then it will initialize file systems by using the configured + * fs.viewfs.overload.scheme.target..impl class. + */ + static class ChildFsGetter extends FsGetter { + + private final String rootScheme; + + ChildFsGetter(String rootScheme) { + this.rootScheme = rootScheme; + } + + @Override + public FileSystem getNewInstance(URI uri, Configuration conf) + throws IOException { + if (uri.getScheme().equals(this.rootScheme)) { + if (LOG.isDebugEnabled()) { + LOG.debug( + "The file system initialized uri scheme is matching with the " + + "given target uri scheme. The target uri is: " + uri); + } + /* + * Avoid looping when target fs scheme is matching to overloaded scheme. + */ + return createFileSystem(uri, conf); + } else { + return FileSystem.newInstance(uri, conf); + } + } + + /** + * When ViewFileSystemOverloadScheme scheme and target uri scheme are + * matching, it will not take advantage of FileSystem cache as it will + * create instance directly. For caching needs please set + * "fs.viewfs.enable.inner.cache" to true. + */ + @Override + public FileSystem get(URI uri, Configuration conf) throws IOException { + if (uri.getScheme().equals(this.rootScheme)) { + // Avoid looping when target fs scheme is matching to overloaded + // scheme. + if (LOG.isDebugEnabled()) { + LOG.debug( + "The file system initialized uri scheme is matching with the " + + "given target uri scheme. So, the target file system " + + "instances will not be cached. To cache fs instances, " + + "please set fs.viewfs.enable.inner.cache to true. " + + "The target uri is: " + uri); + } + return createFileSystem(uri, conf); + } else { + return FileSystem.get(uri, conf); + } + } + + private FileSystem createFileSystem(URI uri, Configuration conf) + throws IOException { + final String fsImplConf = String.format( + FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, + uri.getScheme()); + Class clazz = conf.getClass(fsImplConf, null); + if (clazz == null) { + throw new UnsupportedFileSystemException( + String.format("%s=null: %s: %s", fsImplConf, + "No overload scheme fs configured", uri.getScheme())); + } + FileSystem fs = (FileSystem) newInstance(clazz, uri, conf); + fs.initialize(uri, conf); + return fs; + } + + private T newInstance(Class theClass, URI uri, Configuration conf) { + T result; + try { + Constructor meth = theClass.getConstructor(); + meth.setAccessible(true); + result = meth.newInstance(); + } catch (InvocationTargetException e) { + Throwable cause = e.getCause(); + if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } else { + throw new RuntimeException(cause); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + return result; + } + + } + + /** + * This is an admin only API to give access to its child raw file system, if + * the path is link. If the given path is an internal directory(path is from + * mount paths tree), it will initialize the file system of given path uri + * directly. If path cannot be resolved to any internal directory or link, it + * will throw NotInMountpointException. Please note, this API will not return + * chrooted file system. Instead, this API will get actual raw file system + * instances. + * + * @param path - fs uri path + * @param conf - configuration + * @throws IOException + */ + public FileSystem getRawFileSystem(Path path, Configuration conf) + throws IOException { + InodeTree.ResolveResult res; + try { + res = fsState.resolve(getUriPath(path), true); + return res.isInternalDir() ? fsGetter().get(path.toUri(), conf) + : ((ChRootedFileSystem) res.targetFileSystem).getMyFs(); + } catch (FileNotFoundException e) { + // No link configured with passed path. + throw new NotInMountpointException(path, + "No link found for the given path."); + } + } + +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index 2c8c1a538e433..607bdb8d423a0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -25,10 +25,12 @@ import java.net.URISyntaxException; import java.util.ArrayList; import java.util.EnumSet; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -950,10 +952,19 @@ public int getUriDefaultPort() { return -1; } + /** + * {@inheritDoc} + * + * Note: listStatus on root("/") considers listing from fallbackLink if + * available. If the same directory name is present in configured mount + * path as well as in fallback link, then only the configured mount path + * will be listed in the returned result. + */ @Override public FileStatus[] listStatus(final Path f) throws AccessControlException, IOException { checkPathIsSlash(f); + FileStatus[] fallbackStatuses = listStatusForFallbackLink(); FileStatus[] result = new FileStatus[theInternalDir.getChildren().size()]; int i = 0; for (Entry> iEntry : @@ -979,7 +990,45 @@ public FileStatus[] listStatus(final Path f) throws AccessControlException, myUri, null)); } } - return result; + if (fallbackStatuses.length > 0) { + return consolidateFileStatuses(fallbackStatuses, result); + } else { + return result; + } + } + + private FileStatus[] consolidateFileStatuses(FileStatus[] fallbackStatuses, + FileStatus[] mountPointStatuses) { + ArrayList result = new ArrayList<>(); + Set pathSet = new HashSet<>(); + for (FileStatus status : mountPointStatuses) { + result.add(status); + pathSet.add(status.getPath().getName()); + } + for (FileStatus status : fallbackStatuses) { + if (!pathSet.contains(status.getPath().getName())) { + result.add(status); + } + } + return result.toArray(new FileStatus[0]); + } + + private FileStatus[] listStatusForFallbackLink() throws IOException { + if (theInternalDir.isRoot() && + theInternalDir.getFallbackLink() != null) { + AbstractFileSystem linkedFs = + theInternalDir.getFallbackLink().getTargetFileSystem(); + // Fallback link is only applicable for root + FileStatus[] statuses = linkedFs.listStatus(new Path("/")); + for (FileStatus status : statuses) { + // Fix the path back to viewfs scheme + status.setPath( + new Path(myUri.toString(), status.getPath().getName())); + } + return statuses; + } else { + return new FileStatus[0]; + } } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/package-info.java new file mode 100644 index 0000000000000..89986d0e5ef69 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/package-info.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * ViewFileSystem and ViewFileSystemOverloadScheme classes. + */ +@InterfaceAudience.LimitedPrivate({"MapReduce", "HBase", "Hive" }) +@InterfaceStability.Stable +package org.apache.hadoop.fs.viewfs; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java index 12de2ef91c413..828a17bcb972e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java @@ -579,6 +579,11 @@ public synchronized void processResult(int rc, String path, Object ctx, fatalError(errorMessage); } + @VisibleForTesting + public boolean getWantToBeInElection() { + return wantToBeInElection; + } + /** * We failed to become active. Re-join the election, but * sleep for a few seconds after terminating our existing diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java index 0693dce4281ec..0950ea7e01c57 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java @@ -39,7 +39,6 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,16 +51,15 @@ @InterfaceAudience.Private public abstract class HAAdmin extends Configured implements Tool { - - private static final String FORCEFENCE = "forcefence"; - private static final String FORCEACTIVE = "forceactive"; - + + protected static final String FORCEACTIVE = "forceactive"; + /** * Undocumented flag which allows an administrator to use manual failover * state transitions even when auto-failover is enabled. This is an unsafe * operation, which is why it is not documented in the usage below. */ - private static final String FORCEMANUAL = "forcemanual"; + protected static final String FORCEMANUAL = "forcemanual"; private static final Logger LOG = LoggerFactory.getLogger(HAAdmin.class); private int rpcTimeoutForChecks = -1; @@ -72,15 +70,6 @@ public abstract class HAAdmin extends Configured implements Tool { new UsageInfo("[--"+FORCEACTIVE+"] ", "Transitions the service into Active state")) .put("-transitionToStandby", new UsageInfo("", "Transitions the service into Standby state")) - .put("-transitionToObserver", - new UsageInfo("", - "Transitions the service into Observer state")) - .put("-failover", - new UsageInfo("[--"+FORCEFENCE+"] [--"+FORCEACTIVE+"] ", - "Failover from the first service to the second.\n" + - "Unconditionally fence services if the --"+FORCEFENCE+" option is used.\n" + - "Try to failover to the target service even if it is not ready if the " + - "--" + FORCEACTIVE + " option is used.")) .put("-getServiceState", new UsageInfo("", "Returns the state of the service")) .put("-getAllServiceState", @@ -99,6 +88,14 @@ public abstract class HAAdmin extends Configured implements Tool { protected PrintStream out = System.out; private RequestSource requestSource = RequestSource.REQUEST_BY_USER; + protected RequestSource getRequestSource() { + return requestSource; + } + + protected void setRequestSource(RequestSource requestSource) { + this.requestSource = requestSource; + } + protected HAAdmin() { super(); } @@ -118,34 +115,44 @@ protected String getUsageString() { return "Usage: HAAdmin"; } - protected void printUsage(PrintStream errOut) { - errOut.println(getUsageString()); - for (Map.Entry e : USAGE.entrySet()) { + protected void printUsage(PrintStream pStr, + Map helpEntries) { + pStr.println(getUsageString()); + for (Map.Entry e : helpEntries.entrySet()) { String cmd = e.getKey(); UsageInfo usage = e.getValue(); - + if (usage.args == null) { - errOut.println(" [" + cmd + "]"); + pStr.println(" [" + cmd + "]"); } else { - errOut.println(" [" + cmd + " " + usage.args + "]"); + pStr.println(" [" + cmd + " " + usage.args + "]"); } } - errOut.println(); - ToolRunner.printGenericCommandUsage(errOut); + pStr.println(); + ToolRunner.printGenericCommandUsage(pStr); } - - private void printUsage(PrintStream errOut, String cmd) { - UsageInfo usage = USAGE.get(cmd); + + protected void printUsage(PrintStream pStr) { + printUsage(pStr, USAGE); + } + + protected void printUsage(PrintStream pStr, String cmd, + Map helpEntries) { + UsageInfo usage = helpEntries.get(cmd); if (usage == null) { throw new RuntimeException("No usage for cmd " + cmd); } if (usage.args == null) { - errOut.println(getUsageString() + " [" + cmd + "]"); + pStr.println(getUsageString() + " [" + cmd + "]"); } else { - errOut.println(getUsageString() + " [" + cmd + " " + usage.args + "]"); + pStr.println(getUsageString() + " [" + cmd + " " + usage.args + "]"); } } + protected void printUsage(PrintStream pStr, String cmd) { + printUsage(pStr, cmd, USAGE); + } + private int transitionToActive(final CommandLine cmd) throws IOException, ServiceFailedException { String[] argv = cmd.getArgs(); @@ -225,27 +232,6 @@ private int transitionToStandby(final CommandLine cmd) return 0; } - private int transitionToObserver(final CommandLine cmd) - throws IOException, ServiceFailedException { - String[] argv = cmd.getArgs(); - if (argv.length != 1) { - errOut.println("transitionToObserver: incorrect number of arguments"); - printUsage(errOut, "-transitionToObserver"); - return -1; - } - - HAServiceTarget target = resolveTarget(argv[0]); - if (!checkSupportObserver(target)) { - return -1; - } - if (!checkManualStateManagementOK(target)) { - return -1; - } - HAServiceProtocol proto = target.getProxy(getConf(), 0); - HAServiceProtocolHelper.transitionToObserver(proto, createReqInfo()); - return 0; - } - /** * Ensure that we are allowed to manually manage the HA state of the target * service. If automatic failover is configured, then the automatic @@ -255,7 +241,7 @@ private int transitionToObserver(final CommandLine cmd) * @param target the target to check * @return true if manual state management is allowed */ - private boolean checkManualStateManagementOK(HAServiceTarget target) { + protected boolean checkManualStateManagementOK(HAServiceTarget target) { if (target.isAutoFailoverEnabled()) { if (requestSource != RequestSource.REQUEST_BY_USER_FORCED) { errOut.println( @@ -274,93 +260,19 @@ private boolean checkManualStateManagementOK(HAServiceTarget target) { return true; } - /** - * Check if the target supports the Observer state. - * @param target the target to check - * @return true if the target support Observer state, false otherwise. - */ - private boolean checkSupportObserver(HAServiceTarget target) { - if (target.supportObserver()) { - return true; - } else { - errOut.println( - "The target " + target + " doesn't support Observer state."); - return false; - } - } - - private StateChangeRequestInfo createReqInfo() { + protected StateChangeRequestInfo createReqInfo() { return new StateChangeRequestInfo(requestSource); } - private int failover(CommandLine cmd) - throws IOException, ServiceFailedException { - boolean forceFence = cmd.hasOption(FORCEFENCE); - boolean forceActive = cmd.hasOption(FORCEACTIVE); - - int numOpts = cmd.getOptions() == null ? 0 : cmd.getOptions().length; - final String[] args = cmd.getArgs(); - - if (numOpts > 3 || args.length != 2) { - errOut.println("failover: incorrect arguments"); - printUsage(errOut, "-failover"); - return -1; - } - - HAServiceTarget fromNode = resolveTarget(args[0]); - HAServiceTarget toNode = resolveTarget(args[1]); - - // Check that auto-failover is consistently configured for both nodes. - Preconditions.checkState( - fromNode.isAutoFailoverEnabled() == - toNode.isAutoFailoverEnabled(), - "Inconsistent auto-failover configs between %s and %s!", - fromNode, toNode); - - if (fromNode.isAutoFailoverEnabled()) { - if (forceFence || forceActive) { - // -forceActive doesn't make sense with auto-HA, since, if the node - // is not healthy, then its ZKFC will immediately quit the election - // again the next time a health check runs. - // - // -forceFence doesn't seem to have any real use cases with auto-HA - // so it isn't implemented. - errOut.println(FORCEFENCE + " and " + FORCEACTIVE + " flags not " + - "supported with auto-failover enabled."); - return -1; - } - try { - return gracefulFailoverThroughZKFCs(toNode); - } catch (UnsupportedOperationException e){ - errOut.println("Failover command is not supported with " + - "auto-failover enabled: " + e.getLocalizedMessage()); - return -1; - } - } - - FailoverController fc = new FailoverController(getConf(), - requestSource); - - try { - fc.failover(fromNode, toNode, forceFence, forceActive); - out.println("Failover from "+args[0]+" to "+args[1]+" successful"); - } catch (FailoverFailedException ffe) { - errOut.println("Failover failed: " + ffe.getLocalizedMessage()); - return -1; - } - return 0; - } - - /** * Initiate a graceful failover by talking to the target node's ZKFC. * This sends an RPC to the ZKFC, which coordinates the failover. - * + * * @param toNode the node to fail to * @return status code (0 for success) * @throws IOException if failover does not succeed */ - private int gracefulFailoverThroughZKFCs(HAServiceTarget toNode) + protected int gracefulFailoverThroughZKFCs(HAServiceTarget toNode) throws IOException { int timeout = FailoverController.getRpcTimeoutToNewActive(getConf()); @@ -443,45 +355,52 @@ public int run(String[] argv) throws Exception { return -1; } } - - protected int runCmd(String[] argv) throws Exception { + + protected boolean checkParameterValidity(String[] argv, + Map helpEntries){ + if (argv.length < 1) { - printUsage(errOut); - return -1; + printUsage(errOut, helpEntries); + return false; } String cmd = argv[0]; - if (!cmd.startsWith("-")) { - errOut.println("Bad command '" + cmd + "': expected command starting with '-'"); - printUsage(errOut); - return -1; + errOut.println("Bad command '" + cmd + + "': expected command starting with '-'"); + printUsage(errOut, helpEntries); + return false; } - - if (!USAGE.containsKey(cmd)) { + + if (!helpEntries.containsKey(cmd)) { errOut.println(cmd.substring(1) + ": Unknown command"); - printUsage(errOut); + printUsage(errOut, helpEntries); + return false; + } + return true; + } + + protected boolean checkParameterValidity(String[] argv){ + return checkParameterValidity(argv, USAGE); + } + + protected int runCmd(String[] argv) throws Exception { + if (!checkParameterValidity(argv, USAGE)){ return -1; } - - Options opts = new Options(); + String cmd = argv[0]; + Options opts = new Options(); // Add command-specific options - if ("-failover".equals(cmd)) { - addFailoverCliOpts(opts); - } if("-transitionToActive".equals(cmd)) { addTransitionToActiveCliOpts(opts); } // Mutative commands take FORCEMANUAL option if ("-transitionToActive".equals(cmd) || - "-transitionToStandby".equals(cmd) || - "-transitionToObserver".equals(cmd) || - "-failover".equals(cmd)) { + "-transitionToStandby".equals(cmd)) { opts.addOption(FORCEMANUAL, false, "force manual control even if auto-failover is enabled"); } - CommandLine cmdLine = parseOpts(cmd, opts, argv); if (cmdLine == null) { // error already printed @@ -502,10 +421,6 @@ protected int runCmd(String[] argv) throws Exception { return transitionToActive(cmdLine); } else if ("-transitionToStandby".equals(cmd)) { return transitionToStandby(cmdLine); - } else if ("-transitionToObserver".equals(cmd)) { - return transitionToObserver(cmdLine); - } else if ("-failover".equals(cmd)) { - return failover(cmdLine); } else if ("-getServiceState".equals(cmd)) { return getServiceState(cmdLine); } else if ("-getAllServiceState".equals(cmd)) { @@ -544,7 +459,7 @@ protected int getAllServiceState() { return 0; } - private boolean confirmForceManual() throws IOException { + protected boolean confirmForceManual() throws IOException { return ToolRunner.confirmPrompt( "You have specified the --" + FORCEMANUAL + " flag. This flag is " + "dangerous, as it can induce a split-brain scenario that WILL " + @@ -559,16 +474,7 @@ private boolean confirmForceManual() throws IOException { "Are you sure you want to continue?"); } - /** - * Add CLI options which are specific to the failover command and no - * others. - */ - private void addFailoverCliOpts(Options failoverOpts) { - failoverOpts.addOption(FORCEFENCE, false, "force fencing"); - failoverOpts.addOption(FORCEACTIVE, false, "force failover"); - // Don't add FORCEMANUAL, since that's added separately for all commands - // that change state. - } + /** * Add CLI options which are specific to the transitionToActive command and @@ -577,39 +483,47 @@ private void addFailoverCliOpts(Options failoverOpts) { private void addTransitionToActiveCliOpts(Options transitionToActiveCliOpts) { transitionToActiveCliOpts.addOption(FORCEACTIVE, false, "force active"); } - - private CommandLine parseOpts(String cmdName, Options opts, String[] argv) { + + protected CommandLine parseOpts(String cmdName, Options opts, String[] argv, + Map helpEntries) { try { // Strip off the first arg, since that's just the command name - argv = Arrays.copyOfRange(argv, 1, argv.length); + argv = Arrays.copyOfRange(argv, 1, argv.length); return new GnuParser().parse(opts, argv); } catch (ParseException pe) { errOut.println(cmdName.substring(1) + ": incorrect arguments"); - printUsage(errOut, cmdName); + printUsage(errOut, cmdName, helpEntries); return null; } } - private int help(String[] argv) { + protected CommandLine parseOpts(String cmdName, Options opts, String[] argv) { + return parseOpts(cmdName, opts, argv, USAGE); + } + protected int help(String[] argv) { + return help(argv, USAGE); + } + + protected int help(String[] argv, Map helpEntries) { if (argv.length == 1) { // only -help - printUsage(out); + printUsage(out, helpEntries); return 0; } else if (argv.length != 2) { - printUsage(errOut, "-help"); + printUsage(errOut, "-help", helpEntries); return -1; } String cmd = argv[1]; if (!cmd.startsWith("-")) { cmd = "-" + cmd; } - UsageInfo usageInfo = USAGE.get(cmd); + UsageInfo usageInfo = helpEntries.get(cmd); if (usageInfo == null) { errOut.println(cmd + ": Unknown command"); - printUsage(errOut); + printUsage(errOut, helpEntries); return -1; } - + if (usageInfo.args == null) { out.println(cmd + ": " + usageInfo.help); } else { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java index 7ea5188ad8338..61ea53c420ab1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -33,7 +34,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.authorize.PolicyProvider; -import com.google.protobuf.BlockingService; +import org.apache.hadoop.thirdparty.protobuf.BlockingService; @InterfaceAudience.LimitedPrivate("HDFS") @InterfaceStability.Evolving @@ -63,6 +64,12 @@ public class ZKFCRpcServer implements ZKFCProtocol { // set service-level authorization security policy if (conf.getBoolean( CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { + if (policy == null) { + throw new HadoopIllegalArgumentException( + CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION + + "is configured to true but service-level" + + "authorization security policy is null."); + } server.refreshServiceAcl(conf, policy); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java index ee4ca1a6084a9..3718b7cdb0cc0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java @@ -157,7 +157,10 @@ public HAServiceTarget getLocalTarget() { return localTarget; } - HAServiceState getServiceState() { return serviceState; } + @VisibleForTesting + public HAServiceState getServiceState() { + return serviceState; + } public int run(final String[] args) throws Exception { if (!localTarget.isAutoFailoverEnabled()) { @@ -315,9 +318,10 @@ private void initHM() { healthMonitor.addServiceStateCallback(new ServiceStateCallBacks()); healthMonitor.start(); } - + protected void initRPC() throws IOException { InetSocketAddress bindAddr = getRpcAddressToBindTo(); + LOG.info("ZKFC RpcServer binding to {}", bindAddr); rpcServer = new ZKFCRpcServer(conf, bindAddr, this, getPolicyProvider()); } @@ -799,7 +803,9 @@ private void recheckElectability() { switch (lastHealthState) { case SERVICE_HEALTHY: - elector.joinElection(targetToData(localTarget)); + if(serviceState != HAServiceState.OBSERVER) { + elector.joinElection(targetToData(localTarget)); + } if (quitElectionOnBadState) { quitElectionOnBadState = false; } @@ -909,7 +915,7 @@ protected synchronized void setLastHealthState(HealthMonitor.State newState) { } @VisibleForTesting - ActiveStandbyElector getElectorForTests() { + public ActiveStandbyElector getElectorForTests() { return elector; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java index fec519f3761d4..e53820cd13107 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java @@ -43,8 +43,8 @@ import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.UserGroupInformation; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; /** * This class is the client side translator to translate the requests made on diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java index 72787cfe9937a..8613a469779f3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java @@ -40,8 +40,8 @@ import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.RPC; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java index 62896fa8e7418..7001d93995f0f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java @@ -34,8 +34,8 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; public class ZKFCProtocolClientSideTranslatorPB implements diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolServerSideTranslatorPB.java index 549499885df41..f822200ab9fa0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolServerSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolServerSideTranslatorPB.java @@ -29,8 +29,8 @@ import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.RPC; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; @InterfaceAudience.Private @InterfaceStability.Stable diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index 7e7d64423f169..3fd74f0e89a27 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -154,6 +154,10 @@ public final class HttpServer2 implements FilterContainer { public static final String FILTER_INITIALIZER_PROPERTY = "hadoop.http.filter.initializers"; + public static final String HTTP_SNI_HOST_CHECK_ENABLED_KEY + = "hadoop.http.sni.host.check.enabled"; + public static final boolean HTTP_SNI_HOST_CHECK_ENABLED_DEFAULT = false; + // The ServletContext attribute where the daemon Configuration // gets stored. public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf"; @@ -233,6 +237,8 @@ public static class Builder { private boolean xFrameEnabled; private XFrameOption xFrameOption = XFrameOption.SAMEORIGIN; + private boolean sniHostCheckEnabled; + public Builder setName(String name){ this.name = name; return this; @@ -377,6 +383,17 @@ public Builder setXFrameOption(String option) { return this; } + /** + * Enable or disable sniHostCheck. + * + * @param sniHostCheckEnabled Enable sniHostCheck if true, else disable it. + * @return Builder. + */ + public Builder setSniHostCheckEnabled(boolean sniHostCheckEnabled) { + this.sniHostCheckEnabled = sniHostCheckEnabled; + return this; + } + /** * A wrapper of {@link Configuration#getPassword(String)}. It returns * String instead of char[]. @@ -471,6 +488,13 @@ public HttpServer2 build() throws IOException { int backlogSize = conf.getInt(HTTP_SOCKET_BACKLOG_SIZE_KEY, HTTP_SOCKET_BACKLOG_SIZE_DEFAULT); + // If setSniHostCheckEnabled() is used to enable SNI hostname check, + // configuration lookup is skipped. + if (!sniHostCheckEnabled) { + sniHostCheckEnabled = conf.getBoolean(HTTP_SNI_HOST_CHECK_ENABLED_KEY, + HTTP_SNI_HOST_CHECK_ENABLED_DEFAULT); + } + for (URI ep : endpoints) { final ServerConnector connector; String scheme = ep.getScheme(); @@ -514,22 +538,29 @@ private ServerConnector createHttpChannelConnector( private ServerConnector createHttpsChannelConnector( Server server, HttpConfiguration httpConfig) { httpConfig.setSecureScheme(HTTPS_SCHEME); - httpConfig.addCustomizer(new SecureRequestCustomizer()); + httpConfig.addCustomizer( + new SecureRequestCustomizer(sniHostCheckEnabled)); ServerConnector conn = createHttpChannelConnector(server, httpConfig); SslContextFactory.Server sslContextFactory = new SslContextFactory.Server(); sslContextFactory.setNeedClientAuth(needsClientAuth); - sslContextFactory.setKeyManagerPassword(keyPassword); + if (keyPassword != null) { + sslContextFactory.setKeyManagerPassword(keyPassword); + } if (keyStore != null) { sslContextFactory.setKeyStorePath(keyStore); sslContextFactory.setKeyStoreType(keyStoreType); - sslContextFactory.setKeyStorePassword(keyStorePassword); + if (keyStorePassword != null) { + sslContextFactory.setKeyStorePassword(keyStorePassword); + } } if (trustStore != null) { sslContextFactory.setTrustStorePath(trustStore); sslContextFactory.setTrustStoreType(trustStoreType); - sslContextFactory.setTrustStorePassword(trustStorePassword); + if (trustStorePassword != null) { + sslContextFactory.setTrustStorePassword(trustStorePassword); + } } if(null != excludeCiphers && !excludeCiphers.isEmpty()) { sslContextFactory.setExcludeCipherSuites( diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java index 44b6aaa7af310..c6bc0536473e9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java @@ -18,8 +18,10 @@ package org.apache.hadoop.io; -import java.io.*; -import java.lang.reflect.Array; +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Arrays; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -42,7 +44,7 @@ @InterfaceAudience.Public @InterfaceStability.Stable public class ArrayWritable implements Writable { - private Class valueClass; + private final Class valueClass; private Writable[] values; public ArrayWritable(Class valueClass) { @@ -64,7 +66,7 @@ public ArrayWritable(String[] strings) { } } - public Class getValueClass() { + public Class getValueClass() { return valueClass; } @@ -77,16 +79,16 @@ public String[] toStrings() { } public Object toArray() { - Object result = Array.newInstance(valueClass, values.length); - for (int i = 0; i < values.length; i++) { - Array.set(result, i, values[i]); - } - return result; + return Arrays.copyOf(values, values.length); } - public void set(Writable[] values) { this.values = values; } + public void set(Writable[] values) { + this.values = values; + } - public Writable[] get() { return values; } + public Writable[] get() { + return values; + } @Override public void readFields(DataInput in) throws IOException { @@ -106,5 +108,11 @@ public void write(DataOutput out) throws IOException { } } + @Override + public String toString() { + return "ArrayWritable [valueClass=" + valueClass + ", values=" + + Arrays.toString(values) + "]"; + } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java index 7d7b75ba05a00..c5538c9e56e85 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java @@ -19,6 +19,9 @@ package org.apache.hadoop.io; import java.io.IOException; +import java.util.Arrays; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import java.io.DataInput; import java.io.DataOutput; @@ -35,17 +38,22 @@ @InterfaceStability.Stable public class BytesWritable extends BinaryComparable implements WritableComparable { + private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; private static final int LENGTH_BYTES = 4; - private static final byte[] EMPTY_BYTES = {}; + + private static final byte[] EMPTY_BYTES = new byte[0]; private int size; private byte[] bytes; - + /** * Create a zero-size sequence. */ - public BytesWritable() {this(EMPTY_BYTES);} - + public BytesWritable() { + this.bytes = EMPTY_BYTES; + this.size = 0; + } + /** * Create a BytesWritable using the byte array as the initial value. * @param bytes This array becomes the backing storage for the object. @@ -65,17 +73,15 @@ public BytesWritable(byte[] bytes, int length) { this.bytes = bytes; this.size = length; } - + /** * Get a copy of the bytes that is exactly the length of the data. * See {@link #getBytes()} for faster access to the underlying array. */ public byte[] copyBytes() { - byte[] result = new byte[size]; - System.arraycopy(bytes, 0, result, 0, size); - return result; + return Arrays.copyOf(bytes, size); } - + /** * Get the data backing the BytesWritable. Please use {@link #copyBytes()} * if you need the returned array to be precisely the length of the data. @@ -111,7 +117,7 @@ public int getLength() { public int getSize() { return getLength(); } - + /** * Change the size of the buffer. The values in the old range are preserved * and any new values are undefined. The capacity is changed if it is @@ -121,41 +127,37 @@ public int getSize() { public void setSize(int size) { if (size > getCapacity()) { // Avoid overflowing the int too early by casting to a long. - long newSize = Math.min(Integer.MAX_VALUE, (3L * size) / 2L); + long newSize = Math.min(MAX_ARRAY_SIZE, (3L * size) / 2L); setCapacity((int) newSize); } this.size = size; } - + /** * Get the capacity, which is the maximum size that could handled without * resizing the backing storage. + * * @return The number of bytes */ public int getCapacity() { return bytes.length; } - + /** - * Change the capacity of the backing storage. - * The data is preserved. - * @param new_cap The new capacity in bytes. + * Change the capacity of the backing storage. The data is preserved. + * + * @param capacity The new capacity in bytes. */ - public void setCapacity(int new_cap) { - if (new_cap != getCapacity()) { - byte[] new_data = new byte[new_cap]; - if (new_cap < size) { - size = new_cap; - } - if (size != 0) { - System.arraycopy(bytes, 0, new_data, 0, size); - } - bytes = new_data; + public void setCapacity(final int capacity) { + if (capacity != getCapacity()) { + this.size = Math.min(size, capacity); + this.bytes = Arrays.copyOf(this.bytes, capacity); } } /** * Set the BytesWritable to the contents of the given newData. + * * @param newData the value to set this BytesWritable to. */ public void set(BytesWritable newData) { @@ -163,7 +165,8 @@ public void set(BytesWritable newData) { } /** - * Set the value to a copy of the given byte range + * Set the value to a copy of the given byte range. + * * @param newData the new values to copy in * @param offset the offset in newData to start at * @param length the number of bytes to copy @@ -174,25 +177,18 @@ public void set(byte[] newData, int offset, int length) { System.arraycopy(newData, offset, bytes, 0, size); } - // inherit javadoc @Override public void readFields(DataInput in) throws IOException { setSize(0); // clear the old data setSize(in.readInt()); in.readFully(bytes, 0, size); } - - // inherit javadoc + @Override public void write(DataOutput out) throws IOException { out.writeInt(size); out.write(bytes, 0, size); } - - @Override - public int hashCode() { - return super.hashCode(); - } /** * Are the two byte sequences equal? @@ -204,25 +200,19 @@ public boolean equals(Object right_obj) { return false; } + @Override + public int hashCode() { + return super.hashCode(); + } + /** * Generate the stream of bytes as hex pairs separated by ' '. */ @Override - public String toString() { - StringBuilder sb = new StringBuilder(3*size); - for (int idx = 0; idx < size; idx++) { - // if not the first, put a blank separator in - if (idx != 0) { - sb.append(' '); - } - String num = Integer.toHexString(0xff & bytes[idx]); - // if it is only one digit, add a leading 0. - if (num.length() < 2) { - sb.append('0'); - } - sb.append(num); - } - return sb.toString(); + public String toString() { + return IntStream.range(0, size) + .mapToObj(idx -> String.format("%02x", bytes[idx])) + .collect(Collectors.joining(" ")); } /** A Comparator optimized for BytesWritable. */ @@ -230,20 +220,20 @@ public static class Comparator extends WritableComparator { public Comparator() { super(BytesWritable.class); } - + /** * Compare the buffers in serialized form. */ @Override public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { - return compareBytes(b1, s1+LENGTH_BYTES, l1-LENGTH_BYTES, - b2, s2+LENGTH_BYTES, l2-LENGTH_BYTES); + return compareBytes(b1, s1 + LENGTH_BYTES, l1 - LENGTH_BYTES, + b2, s2 + LENGTH_BYTES, l2 - LENGTH_BYTES); } } - + static { // register this comparator WritableComparator.define(BytesWritable.class, new Comparator()); } - + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java index 0f0f5c7405a6c..b35a32f288b4b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java @@ -30,7 +30,7 @@ import org.apache.hadoop.conf.*; import org.apache.hadoop.util.ProtoUtil; -import com.google.protobuf.Message; +import org.apache.hadoop.thirdparty.protobuf.Message; /** A polymorphic Writable that writes an instance with it's class name. * Handles arrays, strings and primitive types without a Writable wrapper. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java index 3ab327fe76a30..716de3deb4278 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java @@ -24,11 +24,11 @@ import java.nio.ByteBuffer; import java.nio.CharBuffer; import java.nio.charset.CharacterCodingException; -import java.nio.charset.Charset; import java.nio.charset.CharsetDecoder; import java.nio.charset.CharsetEncoder; import java.nio.charset.CodingErrorAction; import java.nio.charset.MalformedInputException; +import java.nio.charset.StandardCharsets; import java.text.CharacterIterator; import java.text.StringCharacterIterator; import java.util.Arrays; @@ -52,63 +52,67 @@ @InterfaceStability.Stable public class Text extends BinaryComparable implements WritableComparable { - + private static final ThreadLocal ENCODER_FACTORY = new ThreadLocal() { @Override protected CharsetEncoder initialValue() { - return Charset.forName("UTF-8").newEncoder(). + return StandardCharsets.UTF_8.newEncoder(). onMalformedInput(CodingErrorAction.REPORT). onUnmappableCharacter(CodingErrorAction.REPORT); } }; - + private static final ThreadLocal DECODER_FACTORY = new ThreadLocal() { @Override protected CharsetDecoder initialValue() { - return Charset.forName("UTF-8").newDecoder(). + return StandardCharsets.UTF_8.newDecoder(). onMalformedInput(CodingErrorAction.REPORT). onUnmappableCharacter(CodingErrorAction.REPORT); } }; - - private static final byte [] EMPTY_BYTES = new byte[0]; - - private byte[] bytes; - private int length; + private static final byte[] EMPTY_BYTES = new byte[0]; + + private byte[] bytes = EMPTY_BYTES; + private int length = 0; + + /** + * Construct an empty text string. + */ public Text() { - bytes = EMPTY_BYTES; } - /** Construct from a string. + /** + * Construct from a string. */ public Text(String string) { set(string); } - /** Construct from another text. */ + /** + * Construct from another text. + */ public Text(Text utf8) { set(utf8); } - /** Construct from a byte array. + /** + * Construct from a byte array. */ public Text(byte[] utf8) { set(utf8); } - + /** * Get a copy of the bytes that is exactly the length of the data. * See {@link #getBytes()} for faster access to the underlying array. */ public byte[] copyBytes() { - byte[] result = new byte[length]; - System.arraycopy(bytes, 0, result, 0, length); - return result; + return Arrays.copyOf(bytes, length); } - + /** * Returns the raw bytes; however, only data up to {@link #getLength()} is * valid. Please use {@link #copyBytes()} if you @@ -119,12 +123,14 @@ public byte[] getBytes() { return bytes; } - /** Returns the number of bytes in the byte array */ + /** + * Returns the number of bytes in the byte array. + */ @Override public int getLength() { return length; } - + /** * Returns the Unicode Scalar Value (32-bit integer value) * for the character at position. Note that this @@ -136,15 +142,15 @@ public int getLength() { public int charAt(int position) { if (position > this.length) return -1; // too long if (position < 0) return -1; // duh. - + ByteBuffer bb = (ByteBuffer)ByteBuffer.wrap(bytes).position(position); return bytesToCodePoint(bb.slice()); } - + public int find(String what) { return find(what, 0); } - + /** * Finds any occurrence of what in the backing * buffer, starting as position start. The starting @@ -156,11 +162,11 @@ public int find(String what) { */ public int find(String what, int start) { try { - ByteBuffer src = ByteBuffer.wrap(this.bytes,0,this.length); + ByteBuffer src = ByteBuffer.wrap(this.bytes, 0, this.length); ByteBuffer tgt = encode(what); byte b = tgt.get(); src.position(start); - + while (src.hasRemaining()) { if (b == src.get()) { // matching first byte src.mark(); // save position in loop @@ -186,54 +192,63 @@ public int find(String what, int start) { } return -1; // not found } catch (CharacterCodingException e) { - // can't get here - e.printStackTrace(); - return -1; + throw new RuntimeException("Should not have happened", e); } - } - /** Set to contain the contents of a string. + } + + /** + * Set to contain the contents of a string. */ public void set(String string) { try { ByteBuffer bb = encode(string, true); bytes = bb.array(); length = bb.limit(); - }catch(CharacterCodingException e) { - throw new RuntimeException("Should not have happened ", e); + } catch (CharacterCodingException e) { + throw new RuntimeException("Should not have happened", e); } } - /** Set to a utf8 byte array + /** + * Set to a utf8 byte array. */ public void set(byte[] utf8) { set(utf8, 0, utf8.length); } - - /** copy a text. */ + + /** + * Copy a text. + */ public void set(Text other) { set(other.getBytes(), 0, other.getLength()); } /** - * Set the Text to range of bytes + * Set the Text to range of bytes. + * * @param utf8 the data to copy from * @param start the first position of the new string * @param len the number of bytes of the new string */ public void set(byte[] utf8, int start, int len) { - setCapacity(len, false); + ensureCapacity(len); System.arraycopy(utf8, start, bytes, 0, len); this.length = len; } /** - * Append a range of bytes to the end of the given text + * Append a range of bytes to the end of the given text. + * * @param utf8 the data to copy from * @param start the first position to append from utf8 * @param len the number of bytes to append */ public void append(byte[] utf8, int start, int len) { - setCapacity(length + len, true); + byte[] original = bytes; + int capacity = Math.max(length + len, length + (length >> 1)); + if (ensureCapacity(capacity)) { + System.arraycopy(original, 0, bytes, 0, length); + } System.arraycopy(utf8, start, bytes, length, len); length += len; } @@ -250,47 +265,39 @@ public void clear() { length = 0; } - /* + /** * Sets the capacity of this Text object to at least - * len bytes. If the current buffer is longer, - * then the capacity and existing content of the buffer are - * unchanged. If len is larger - * than the current capacity, the Text object's capacity is - * increased to match. - * @param len the number of bytes we need - * @param keepData should the old data be kept - */ - private void setCapacity(int len, boolean keepData) { - if (bytes == null || bytes.length < len) { - if (bytes != null && keepData) { - bytes = Arrays.copyOf(bytes, Math.max(len,length << 1)); - } else { - bytes = new byte[len]; - } + * capacity bytes. If the current buffer is longer, then the + * capacity and existing content of the buffer are unchanged. If + * capacity is larger than the current capacity, the Text + * object's capacity is increased to match and any existing data is lost. + * + * @param capacity the number of bytes we need + * @return true if the internal array was resized or false otherwise + */ + private boolean ensureCapacity(final int capacity) { + if (bytes.length < capacity) { + bytes = new byte[capacity]; + return true; } + return false; } - - /** - * Convert text back to string - * @see java.lang.Object#toString() - */ + @Override public String toString() { try { return decode(bytes, 0, length); } catch (CharacterCodingException e) { - throw new RuntimeException("Should not have happened " , e); + throw new RuntimeException("Should not have happened", e); } } - - /** deserialize - */ + @Override public void readFields(DataInput in) throws IOException { int newLength = WritableUtils.readVInt(in); readWithKnownLength(in, newLength); } - + public void readFields(DataInput in, int maxLength) throws IOException { int newLength = WritableUtils.readVInt(in); if (newLength < 0) { @@ -303,7 +310,9 @@ public void readFields(DataInput in, int maxLength) throws IOException { readWithKnownLength(in, newLength); } - /** Skips over one Text in the input. */ + /** + * Skips over one Text in the input. + */ public static void skip(DataInput in) throws IOException { int length = WritableUtils.readVInt(in); WritableUtils.skipFully(in, length); @@ -315,14 +324,14 @@ public static void skip(DataInput in) throws IOException { * format. */ public void readWithKnownLength(DataInput in, int len) throws IOException { - setCapacity(len, false); + ensureCapacity(len); in.readFully(bytes, 0, len); length = len; } - /** serialize - * write this object to out - * length uses zero-compressed encoding + /** + * Serialize. Write this object to out length uses zero-compressed encoding. + * * @see Writable#write(DataOutput) */ @Override @@ -341,7 +350,10 @@ public void write(DataOutput out, int maxLength) throws IOException { out.write(bytes, 0, length); } - /** Returns true iff o is a Text with the same contents. */ + /** + * Returns true iff o is a Text with the same length and same + * contents. + */ @Override public boolean equals(Object o) { if (o instanceof Text) @@ -365,7 +377,7 @@ public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { int n1 = WritableUtils.decodeVIntSize(b1[s1]); int n2 = WritableUtils.decodeVIntSize(b2[s2]); - return compareBytes(b1, s1+n1, l1-n1, b2, s2+n2, l2-n2); + return compareBytes(b1, s1 + n1, l1 - n1, b2, s2 + n2, l2 - n2); } } @@ -383,12 +395,12 @@ public int compare(byte[] b1, int s1, int l1, public static String decode(byte[] utf8) throws CharacterCodingException { return decode(ByteBuffer.wrap(utf8), true); } - + public static String decode(byte[] utf8, int start, int length) throws CharacterCodingException { return decode(ByteBuffer.wrap(utf8, start, length), true); } - + /** * Converts the provided byte array to a String using the * UTF-8 encoding. If replace is true, then @@ -400,7 +412,7 @@ public static String decode(byte[] utf8, int start, int length, boolean replace) throws CharacterCodingException { return decode(ByteBuffer.wrap(utf8, start, length), replace); } - + private static String decode(ByteBuffer utf8, boolean replace) throws CharacterCodingException { CharsetDecoder decoder = DECODER_FACTORY.get(); @@ -463,7 +475,7 @@ public static ByteBuffer encode(String string, boolean replace) public static String readString(DataInput in) throws IOException { return readString(in, Integer.MAX_VALUE); } - + /** Read a UTF8 encoded string with a maximum size */ public static String readString(DataInput in, int maxLength) @@ -473,8 +485,9 @@ public static String readString(DataInput in, int maxLength) in.readFully(bytes, 0, length); return decode(bytes); } - - /** Write a UTF8 encoded string to out + + /** + * Write a UTF8 encoded string to out. */ public static int writeString(DataOutput out, String s) throws IOException { ByteBuffer bytes = encode(s); @@ -484,7 +497,8 @@ public static int writeString(DataOutput out, String s) throws IOException { return length; } - /** Write a UTF8 encoded string with a maximum size to out + /** + * Write a UTF8 encoded string with a maximum size to out. */ public static int writeString(DataOutput out, String s, int maxLength) throws IOException { @@ -501,24 +515,26 @@ public static int writeString(DataOutput out, String s, int maxLength) } ////// states for validateUTF8 - + private static final int LEAD_BYTE = 0; private static final int TRAIL_BYTE_1 = 1; private static final int TRAIL_BYTE = 2; - /** - * Check if a byte array contains valid utf-8 + /** + * Check if a byte array contains valid UTF-8. + * * @param utf8 byte array - * @throws MalformedInputException if the byte array contains invalid utf-8 + * @throws MalformedInputException if the byte array contains invalid UTF-8 */ public static void validateUTF8(byte[] utf8) throws MalformedInputException { - validateUTF8(utf8, 0, utf8.length); + validateUTF8(utf8, 0, utf8.length); } - + /** - * Check to see if a byte array is valid utf-8 + * Check to see if a byte array is valid UTF-8. + * * @param utf8 the array of bytes * @param start the offset of the first byte in the array * @param len the length of the byte sequence @@ -641,7 +657,6 @@ public static int bytesToCodePoint(ByteBuffer bytes) { return ch; } - static final int offsetsFromUTF8[] = { 0x00000000, 0x00003080, 0x000E2080, 0x03C82080, 0xFA082080, 0x82082080 }; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java index 99590eda679af..7fd5633daa698 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java @@ -236,7 +236,7 @@ public Decompressor createDecompressor() { */ @Override public String getDefaultExtension() { - return ".bz2"; + return CodecConstants.BZIP2_CODEC_EXTENSION; } private static class BZip2CompressionOutputStream extends diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecConstants.java new file mode 100644 index 0000000000000..96410a18ebcb5 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecConstants.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.io.compress; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Codec related constants. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class CodecConstants { + + private CodecConstants() { + } + /** + * Default extension for {@link org.apache.hadoop.io.compress.DefaultCodec}. + */ + public static final String DEFAULT_CODEC_EXTENSION = ".deflate"; + + /** + * Default extension for {@link org.apache.hadoop.io.compress.BZip2Codec}. + */ + public static final String BZIP2_CODEC_EXTENSION = ".bz2"; + + /** + * Default extension for {@link org.apache.hadoop.io.compress.GzipCodec}. + */ + public static final String GZIP_CODEC_EXTENSION = ".gz"; + + /** + * Default extension for {@link org.apache.hadoop.io.compress.Lz4Codec}. + */ + public static final String LZ4_CODEC_EXTENSION = ".lz4"; + + /** + * Default extension for + * {@link org.apache.hadoop.io.compress.PassthroughCodec}. + */ + public static final String PASSTHROUGH_CODEC_EXTENSION = ".passthrough"; + + /** + * Default extension for {@link org.apache.hadoop.io.compress.SnappyCodec}. + */ + public static final String SNAPPY_CODEC_EXTENSION = ".snappy"; + + /** + * Default extension for {@link org.apache.hadoop.io.compress.ZStandardCodec}. + */ + public static final String ZSTANDARD_CODEC_EXTENSION = ".zst"; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java index 33f39ef9297fb..d2ffb22eaafb3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java @@ -114,7 +114,7 @@ public DirectDecompressor createDirectDecompressor() { @Override public String getDefaultExtension() { - return ".deflate"; + return CodecConstants.DEFAULT_CODEC_EXTENSION; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java index 9bd861da9e890..1535e8c3d386e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java @@ -206,7 +206,7 @@ public DirectDecompressor createDirectDecompressor() { @Override public String getDefaultExtension() { - return ".gz"; + return CodecConstants.GZIP_CODEC_EXTENSION; } static final class GzipZlibCompressor extends ZlibCompressor { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java index 45b5e9cdabd28..ba6b487150501 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java @@ -221,6 +221,6 @@ public Decompressor createDecompressor() { */ @Override public String getDefaultExtension() { - return ".lz4"; + return CodecConstants.LZ4_CODEC_EXTENSION; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/PassthroughCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/PassthroughCodec.java index a3f0bffeebc0f..074762c0e8f7a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/PassthroughCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/PassthroughCodec.java @@ -77,7 +77,8 @@ public class PassthroughCodec * This default extension is here so that if no extension has been defined, * some value is still returned: {@value}.. */ - public static final String DEFAULT_EXTENSION = ".passthrough"; + public static final String DEFAULT_EXTENSION = + CodecConstants.PASSTHROUGH_CODEC_EXTENSION; private Configuration conf; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java index cd0c7880376bf..686f30c9f89a2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java @@ -225,6 +225,6 @@ public DirectDecompressor createDirectDecompressor() { */ @Override public String getDefaultExtension() { - return ".snappy"; + return CodecConstants.SNAPPY_CODEC_EXTENSION; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/ZStandardCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/ZStandardCodec.java index c56bbba3b5959..a7afebc0c49ae 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/ZStandardCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/ZStandardCodec.java @@ -230,7 +230,7 @@ public Decompressor createDecompressor() { */ @Override public String getDefaultExtension() { - return ".zst"; + return CodecConstants.ZSTANDARD_CODEC_EXTENSION; } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java index 973afa33e3f35..160b8e029e56b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java @@ -224,28 +224,31 @@ public long getLength() { * JNI wrapper of persist memory operations. */ public static class Pmem { - // check whether the address is a Pmem address or DIMM address + // Check whether the address is a Pmem address or DIMM address public static boolean isPmem(long address, long length) { return NativeIO.POSIX.isPmemCheck(address, length); } - // create a pmem file and memory map it - public static PmemMappedRegion mapBlock(String path, long length) { - return NativeIO.POSIX.pmemCreateMapFile(path, length); + // Map a file in persistent memory, if the given file exists, + // directly map it. If not, create the named file on persistent memory + // and then map it. + public static PmemMappedRegion mapBlock( + String path, long length, boolean isFileExist) { + return NativeIO.POSIX.pmemMapFile(path, length, isFileExist); } - // unmap a pmem file + // Unmap a pmem file public static boolean unmapBlock(long address, long length) { return NativeIO.POSIX.pmemUnMap(address, length); } - // copy data from disk file(src) to pmem file(dest), without flush + // Copy data from disk file(src) to pmem file(dest), without flush public static void memCopy(byte[] src, long dest, boolean isPmem, long length) { NativeIO.POSIX.pmemCopy(src, dest, isPmem, length); } - // flush the memory content to persistent storage + // Flush the memory content to persistent storage public static void memSync(PmemMappedRegion region) { if (region.isPmem()) { NativeIO.POSIX.pmemDrain(); @@ -261,8 +264,8 @@ public static String getPmdkLibPath() { private static native String getPmdkLibPath(); private static native boolean isPmemCheck(long address, long length); - private static native PmemMappedRegion pmemCreateMapFile(String path, - long length); + private static native PmemMappedRegion pmemMapFile(String path, + long length, boolean isFileExist); private static native boolean pmemUnMap(long address, long length); private static native void pmemCopy(byte[] src, long dest, boolean isPmem, long length); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java index ba07db4c2ae5e..fcbcc868cf6dd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java @@ -34,6 +34,7 @@ import javax.security.sasl.SaslException; +import org.apache.hadoop.ipc.ObserverRetryOnActiveException; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.StandbyException; @@ -678,7 +679,7 @@ public RetryAction shouldRetry(Exception e, int retries, e instanceof UnknownHostException || e instanceof StandbyException || e instanceof ConnectTimeoutException || - isWrappedStandbyException(e)) { + shouldFailoverOnException(e)) { return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY, getFailoverOrRetrySleepTime(failovers)); } else if (e instanceof RetriableException @@ -689,7 +690,8 @@ public RetryAction shouldRetry(Exception e, int retries, } else if (e instanceof InvalidToken) { return new RetryAction(RetryAction.RetryDecision.FAIL, 0, "Invalid or Cancelled Token"); - } else if (e instanceof AccessControlException) { + } else if (e instanceof AccessControlException || + hasWrappedAccessControlException(e)) { return new RetryAction(RetryAction.RetryDecision.FAIL, 0, "Access denied"); } else if (e instanceof SocketException @@ -729,12 +731,13 @@ private static long calculateExponentialTime(long time, int retries) { return calculateExponentialTime(time, retries, Long.MAX_VALUE); } - private static boolean isWrappedStandbyException(Exception e) { + private static boolean shouldFailoverOnException(Exception e) { if (!(e instanceof RemoteException)) { return false; } Exception unwrapped = ((RemoteException)e).unwrapRemoteException( - StandbyException.class); + StandbyException.class, + ObserverRetryOnActiveException.class); return unwrapped instanceof StandbyException; } @@ -759,4 +762,13 @@ static RetriableException getWrappedRetriableException(Exception e) { return unwrapped instanceof RetriableException ? (RetriableException) unwrapped : null; } + + private static boolean hasWrappedAccessControlException(Exception e) { + Throwable throwable = e; + while (!(throwable instanceof AccessControlException) && + throwable.getCause() != null) { + throwable = throwable.getCause(); + } + return throwable instanceof AccessControlException; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java index 7e43974ba9cb3..c035a42d4a751 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java @@ -23,7 +23,7 @@ import org.apache.hadoop.io.retry.RetryPolicies.MultipleLinearRandomRetry; import org.apache.hadoop.ipc.RemoteException; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.ipc.RetriableException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java index 81b7d34d0d1e0..53ac34b61272f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java @@ -22,6 +22,7 @@ import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.AbstractQueue; +import java.util.Arrays; import java.util.Collection; import java.util.Iterator; import java.util.concurrent.BlockingQueue; @@ -77,8 +78,10 @@ public CallQueueManager(Class> backingClass, int priorityLevels = parseNumLevels(namespace, conf); this.scheduler = createScheduler(schedulerClass, priorityLevels, namespace, conf); + int[] capacityWeights = parseCapacityWeights(priorityLevels, + namespace, conf); BlockingQueue bq = createCallQueueInstance(backingClass, - priorityLevels, maxQueueSize, namespace, conf); + priorityLevels, maxQueueSize, namespace, capacityWeights, conf); this.clientBackOffEnabled = clientBackOffEnabled; this.serverFailOverEnabled = conf.getBoolean( namespace + "." + @@ -146,13 +149,14 @@ private static T createScheduler( private > T createCallQueueInstance( Class theClass, int priorityLevels, int maxLen, String ns, - Configuration conf) { + int[] capacityWeights, Configuration conf) { // Used for custom, configurable callqueues try { Constructor ctor = theClass.getDeclaredConstructor(int.class, - int.class, String.class, Configuration.class); - return ctor.newInstance(priorityLevels, maxLen, ns, conf); + int.class, String.class, int[].class, Configuration.class); + return ctor.newInstance(priorityLevels, maxLen, ns, + capacityWeights, conf); } catch (RuntimeException e) { throw e; } catch (InvocationTargetException e) { @@ -343,6 +347,47 @@ private static int parseNumLevels(String ns, Configuration conf) { return retval; } + /** + * Read the weights of capacity in callqueue and pass the value to + * callqueue constructions. + */ + private static int[] parseCapacityWeights( + int priorityLevels, String ns, Configuration conf) { + int[] weights = conf.getInts(ns + "." + + CommonConfigurationKeys.IPC_CALLQUEUE_CAPACITY_WEIGHTS_KEY); + if (weights.length == 0) { + weights = getDefaultQueueCapacityWeights(priorityLevels); + } else if (weights.length != priorityLevels) { + throw new IllegalArgumentException( + CommonConfigurationKeys.IPC_CALLQUEUE_CAPACITY_WEIGHTS_KEY + " must " + + "specify " + priorityLevels + " capacity weights: one for each " + + "priority level"); + } else { + // only allow positive numbers + for (int w : weights) { + if (w <= 0) { + throw new IllegalArgumentException( + CommonConfigurationKeys.IPC_CALLQUEUE_CAPACITY_WEIGHTS_KEY + + " only takes positive weights. " + w + " capacity weight " + + "found"); + } + } + } + return weights; + } + + /** + * By default, queue capacity is the same for all priority levels. + * + * @param priorityLevels number of levels + * @return default weights + */ + public static int[] getDefaultQueueCapacityWeights(int priorityLevels) { + int[] weights = new int[priorityLevels]; + Arrays.fill(weights, 1); + return weights; + } + /** * Replaces active queue with the newly requested one and transfers * all calls to the newQ before returning. @@ -355,8 +400,9 @@ public synchronized void swapQueue( this.scheduler.stop(); RpcScheduler newScheduler = createScheduler(schedulerClass, priorityLevels, ns, conf); + int[] capacityWeights = parseCapacityWeights(priorityLevels, ns, conf); BlockingQueue newQ = createCallQueueInstance(queueClassToUse, - priorityLevels, maxSize, ns, conf); + priorityLevels, maxSize, ns, capacityWeights, conf); // Our current queue becomes the old queue BlockingQueue oldQ = putRef.get(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index 358c0d7ac3448..688eed647c209 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -761,8 +761,17 @@ public Object run() throws IOException, InterruptedException { throw (IOException) new IOException(msg).initCause(ex); } } else { - LOG.warn("Exception encountered while connecting to " - + "the server : " + ex); + // With RequestHedgingProxyProvider, one rpc call will send multiple + // requests to all namenodes. After one request return successfully, + // all other requests will be interrupted. It's not a big problem, + // and should not print a warning log. + if (ex instanceof InterruptedIOException) { + LOG.debug("Exception encountered while connecting to the server", + ex); + } else { + LOG.warn("Exception encountered while connecting to the server ", + ex); + } } if (ex instanceof RemoteException) throw (RemoteException) ex; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java index ffeafb5c0dc70..3e952eb63c3ff 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java @@ -42,6 +42,7 @@ import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.ipc.metrics.DecayRpcSchedulerDetailedMetrics; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; @@ -154,6 +155,10 @@ public class DecayRpcScheduler implements RpcScheduler, private final AtomicDoubleArray responseTimeAvgInLastWindow; private final AtomicLongArray responseTimeCountInLastWindow; + // RPC queue time rates per queue + private final DecayRpcSchedulerDetailedMetrics + decayRpcSchedulerDetailedMetrics; + // Pre-computed scheduling decisions during the decay sweep are // atomically swapped in as a read-only map private final AtomicReference> scheduleCacheRef = @@ -236,6 +241,10 @@ public DecayRpcScheduler(int numLevels, String ns, Configuration conf) { Preconditions.checkArgument(topUsersCount > 0, "the number of top users for scheduler metrics must be at least 1"); + decayRpcSchedulerDetailedMetrics = + DecayRpcSchedulerDetailedMetrics.create(ns); + decayRpcSchedulerDetailedMetrics.init(numLevels); + // Setup delay timer Timer timer = new Timer(true); DecayTask task = new DecayTask(this, timer); @@ -626,6 +635,11 @@ public void addResponseTime(String callName, Schedulable schedulable, long queueTime = details.get(Timing.QUEUE, TimeUnit.MILLISECONDS); long processingTime = details.get(Timing.PROCESSING, TimeUnit.MILLISECONDS); + this.decayRpcSchedulerDetailedMetrics.addQueueTime( + priorityLevel, queueTime); + this.decayRpcSchedulerDetailedMetrics.addProcessingTime( + priorityLevel, processingTime); + responseTimeCountInCurrWindow.getAndIncrement(priorityLevel); responseTimeTotalInCurrWindow.getAndAdd(priorityLevel, queueTime+processingTime); @@ -987,9 +1001,16 @@ private Map getDecayedCallCosts() { return decayedCallCosts; } + @VisibleForTesting + public DecayRpcSchedulerDetailedMetrics + getDecayRpcSchedulerDetailedMetrics() { + return decayRpcSchedulerDetailedMetrics; + } + @Override public void stop() { metricsProxy.unregisterSource(namespace); MetricsProxy.removeInstance(namespace); + decayRpcSchedulerDetailedMetrics.shutdown(); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java index d15a71000bd54..939149fcc5e57 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java @@ -80,17 +80,27 @@ private void signalNotEmpty() { /* Failover if queue is filled up */ private boolean serverFailOverEnabled; + + @VisibleForTesting + public FairCallQueue(int priorityLevels, int capacity, String ns, + Configuration conf) { + this(priorityLevels, capacity, ns, + CallQueueManager.getDefaultQueueCapacityWeights(priorityLevels), conf); + } + /** * Create a FairCallQueue. * @param capacity the total size of all sub-queues * @param ns the prefix to use for configuration + * @param capacityWeights the weights array for capacity allocation + * among subqueues * @param conf the configuration to read from * Notes: Each sub-queue has a capacity of `capacity / numSubqueues`. * The first or the highest priority sub-queue has an excess capacity * of `capacity % numSubqueues` */ public FairCallQueue(int priorityLevels, int capacity, String ns, - Configuration conf) { + int[] capacityWeights, Configuration conf) { if(priorityLevels < 1) { throw new IllegalArgumentException("Number of Priority Levels must be " + "at least 1"); @@ -101,11 +111,18 @@ public FairCallQueue(int priorityLevels, int capacity, String ns, this.queues = new ArrayList>(numQueues); this.overflowedCalls = new ArrayList(numQueues); - int queueCapacity = capacity / numQueues; - int capacityForFirstQueue = queueCapacity + (capacity % numQueues); + int totalWeights = 0; + for (int i = 0; i < capacityWeights.length; i++) { + totalWeights += capacityWeights[i]; + } + int residueCapacity = capacity % totalWeights; + int unitCapacity = capacity / totalWeights; + int queueCapacity; for(int i=0; i < numQueues; i++) { + queueCapacity = unitCapacity * capacityWeights[i]; if (i == 0) { - this.queues.add(new LinkedBlockingQueue(capacityForFirstQueue)); + this.queues.add(new LinkedBlockingQueue( + queueCapacity + residueCapacity)); } else { this.queues.add(new LinkedBlockingQueue(queueCapacity)); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ObserverRetryOnActiveException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ObserverRetryOnActiveException.java index 336b304f2d084..80e2898155a29 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ObserverRetryOnActiveException.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ObserverRetryOnActiveException.java @@ -20,8 +20,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import java.io.IOException; - /** * Thrown by a remote ObserverNode indicating the operation has failed and the * client should retry active namenode directly (instead of retry other @@ -29,7 +27,7 @@ */ @InterfaceAudience.Private @InterfaceStability.Evolving -public class ObserverRetryOnActiveException extends IOException { +public class ObserverRetryOnActiveException extends StandbyException { static final long serialVersionUID = 1L; public ObserverRetryOnActiveException(String msg) { super(msg); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java index e30f28a698a43..bb86cfc35bf4e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java @@ -18,10 +18,16 @@ package org.apache.hadoop.ipc; import java.io.IOException; +import java.util.concurrent.ConcurrentHashMap; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.ByteString; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; /** * Helper methods for protobuf related RPC implementation @@ -46,4 +52,67 @@ public static IOException getRemoteException(ServiceException se) { } return e instanceof IOException ? (IOException) e : new IOException(se); } + + + /** + * Map used to cache fixed strings to ByteStrings. Since there is no + * automatic expiration policy, only use this for strings from a fixed, small + * set. + *

+ * This map should not be accessed directly. Used the getFixedByteString + * methods instead. + */ + private final static ConcurrentHashMap + FIXED_BYTESTRING_CACHE = new ConcurrentHashMap<>(); + + /** + * Get the ByteString for frequently used fixed and small set strings. + * @param key string + * @return + */ + public static ByteString getFixedByteString(Text key) { + ByteString value = FIXED_BYTESTRING_CACHE.get(key); + if (value == null) { + value = ByteString.copyFromUtf8(key.toString()); + FIXED_BYTESTRING_CACHE.put(new Text(key.copyBytes()), value); + } + return value; + } + + /** + * Get the ByteString for frequently used fixed and small set strings. + * @param key string + * @return + */ + public static ByteString getFixedByteString(String key) { + ByteString value = FIXED_BYTESTRING_CACHE.get(key); + if (value == null) { + value = ByteString.copyFromUtf8(key); + FIXED_BYTESTRING_CACHE.put(key, value); + } + return value; + } + + public static ByteString getByteString(byte[] bytes) { + // return singleton to reduce object allocation + return (bytes.length == 0) ? ByteString.EMPTY : ByteString.copyFrom(bytes); + } + + public static Token tokenFromProto( + TokenProto tokenProto) { + Token token = new Token<>( + tokenProto.getIdentifier().toByteArray(), + tokenProto.getPassword().toByteArray(), new Text(tokenProto.getKind()), + new Text(tokenProto.getService())); + return token; + } + + public static TokenProto protoFromToken(Token tok) { + TokenProto.Builder builder = TokenProto.newBuilder(). + setIdentifier(getByteString(tok.getIdentifier())). + setPassword(getByteString(tok.getPassword())). + setKindBytes(getFixedByteString(tok.getKind())). + setServiceBytes(getFixedByteString(tok.getService())); + return builder.build(); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java index c6b3fded7c731..14b356f847acf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java @@ -19,8 +19,8 @@ package org.apache.hadoop.ipc; import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.*; -import com.google.protobuf.Descriptors.MethodDescriptor; +import org.apache.hadoop.thirdparty.protobuf.*; +import org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability.Unstable; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java index 463593e2abce3..50b70ca4bec1a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ipc; -import com.google.protobuf.Message; +import org.apache.hadoop.thirdparty.protobuf.Message; public interface ProtobufRpcEngineCallback { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInfoServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInfoServerSideTranslatorPB.java index d9d80a84d860d..a78892e7efeed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInfoServerSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInfoServerSideTranslatorPB.java @@ -25,8 +25,8 @@ import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto; import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolVersionProto; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; /** * This class serves the requests for protocol versions and signatures by diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java index fbafabcde4a6d..99eb487be495c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.ipc; +import com.google.common.base.Joiner; import java.io.Closeable; import java.io.IOException; import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Proxy; @@ -74,7 +76,8 @@ public static T combine(Class combinedProxyInterface, + combinedProxyInterface + " do not cover method " + m); } - InvocationHandler handler = new CombinedProxyInvocationHandler(proxies); + InvocationHandler handler = + new CombinedProxyInvocationHandler(combinedProxyInterface, proxies); return (T) Proxy.newProxyInstance(combinedProxyInterface.getClassLoader(), new Class[] {combinedProxyInterface}, handler); } @@ -82,9 +85,12 @@ public static T combine(Class combinedProxyInterface, private static final class CombinedProxyInvocationHandler implements RpcInvocationHandler { + private final Class proxyInterface; private final Object[] proxies; - private CombinedProxyInvocationHandler(Object[] proxies) { + private CombinedProxyInvocationHandler(Class proxyInterface, + Object[] proxies) { + this.proxyInterface = proxyInterface; this.proxies = proxies; } @@ -97,6 +103,8 @@ public Object invoke(Object proxy, Method method, Object[] args) return method.invoke(underlyingProxy, args); } catch (IllegalAccessException|IllegalArgumentException e) { lastException = e; + } catch (InvocationTargetException ite) { + throw ite.getCause(); } } // This shouldn't happen since the method coverage was verified in build() @@ -116,6 +124,12 @@ public ConnectionId getConnectionId() { return RPC.getConnectionIdForProxy(proxies[0]); } + @Override + public String toString() { + return "CombinedProxy[" + proxyInterface.getSimpleName() + "][" + + Joiner.on(",").join(proxies) + "]"; + } + @Override public void close() throws IOException { MultipleIOException.Builder exceptionBuilder = diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java index 0be5cb5fc0fc4..4f95863b03db6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java @@ -59,7 +59,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; -import com.google.protobuf.BlockingService; +import org.apache.hadoop.thirdparty.protobuf.BlockingService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java index 6f6ceb5a6caa4..4bde261eab1b0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java @@ -290,7 +290,7 @@ private CacheEntry waitForCompletion(CacheEntry newEntry) { Thread.currentThread().interrupt(); } } - // Previous request has failed, the expectation is is that it will be + // Previous request has failed, the expectation is that it will be // retried again. if (mapEntry.state != CacheEntry.SUCCESS) { mapEntry.state = CacheEntry.INPROGRESS; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java index da1e69965a901..84ecba1d34e9c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java @@ -32,8 +32,8 @@ import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto; import org.apache.hadoop.net.NetUtils; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; /** * This class maintains a cache of protocol versions and corresponding protocol diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java index a97af87bdfb01..6604bd0cc1c68 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java @@ -29,9 +29,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Writable; -import com.google.protobuf.CodedInputStream; -import com.google.protobuf.CodedOutputStream; -import com.google.protobuf.Message; +import org.apache.hadoop.thirdparty.protobuf.CodedInputStream; +import org.apache.hadoop.thirdparty.protobuf.CodedOutputStream; +import org.apache.hadoop.thirdparty.protobuf.Message; // note anything marked public is solely for access by SaslRpcClient @InterfaceAudience.Private diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 36785e147d757..4448164f4b137 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -124,9 +124,9 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.ByteString; -import com.google.protobuf.CodedOutputStream; -import com.google.protobuf.Message; +import org.apache.hadoop.thirdparty.protobuf.ByteString; +import org.apache.hadoop.thirdparty.protobuf.CodedOutputStream; +import org.apache.hadoop.thirdparty.protobuf.Message; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -382,20 +382,28 @@ public static InetAddress getRemoteIp() { /** * Returns the SASL qop for the current call, if the current call is - * set, and the SASL negotiation is done. Otherwise return null. Note - * that CurCall is thread local object. So in fact, different handler - * threads will process different CurCall object. + * set, and the SASL negotiation is done. Otherwise return null + * Note this only returns established QOP for auxiliary port, and + * returns null for primary (non-auxiliary) port. + * + * Also note that CurCall is thread local object. So in fact, different + * handler threads will process different CurCall object. * * Also, only return for RPC calls, not supported for other protocols. * @return the QOP of the current connection. */ - public static String getEstablishedQOP() { + public static String getAuxiliaryPortEstablishedQOP() { Call call = CurCall.get(); - if (call == null || !(call instanceof RpcCall)) { + if (!(call instanceof RpcCall)) { return null; } RpcCall rpcCall = (RpcCall)call; - return rpcCall.connection.getEstablishedQOP(); + if (rpcCall.connection.isOnAuxiliaryPort()) { + return rpcCall.connection.getEstablishedQOP(); + } else { + // Not sending back QOP for primary port + return null; + } } /** @@ -751,8 +759,8 @@ public static class Call implements Schedulable, private volatile String detailedMetricsName = ""; final int callId; // the client's call id final int retryCount; // the retry count of the call - long timestampNanos; // time received when response is null - // time served when response is not null + long timestampNanos; // time the call was received + long responseTimestampNanos; // time the call was served private AtomicInteger responseWaitCount = new AtomicInteger(1); final RPC.RpcKind rpcKind; final byte[] clientId; @@ -789,6 +797,7 @@ public Call(int id, int retryCount, Void ignore1, Void ignore2, this.callId = id; this.retryCount = retryCount; this.timestampNanos = Time.monotonicNowNanos(); + this.responseTimestampNanos = timestampNanos; this.rpcKind = kind; this.clientId = clientId; this.traceScope = traceScope; @@ -1184,7 +1193,8 @@ private class Listener extends Thread { private boolean reuseAddr = conf.getBoolean( CommonConfigurationKeysPublic.IPC_SERVER_REUSEADDR_KEY, CommonConfigurationKeysPublic.IPC_SERVER_REUSEADDR_DEFAULT); - + private boolean isOnAuxiliaryPort; + Listener(int port) throws IOException { address = new InetSocketAddress(bindAddress, port); // Create a new server socket and set to non blocking mode @@ -1212,6 +1222,11 @@ private class Listener extends Thread { acceptChannel.register(selector, SelectionKey.OP_ACCEPT); this.setName("IPC Server listener on " + port); this.setDaemon(true); + this.isOnAuxiliaryPort = false; + } + + void setIsAuxiliary() { + this.isOnAuxiliaryPort = true; } private class Reader extends Thread { @@ -1380,11 +1395,12 @@ void doAccept(SelectionKey key) throws InterruptedException, IOException, OutOf channel.socket().setKeepAlive(true); Reader reader = getReader(); - Connection c = connectionManager.register(channel, this.listenPort); + Connection c = connectionManager.register(channel, + this.listenPort, this.isOnAuxiliaryPort); // If the connectionManager can't take it, close the connection. if (c == null) { if (channel.isOpen()) { - IOUtils.cleanup(null, channel); + IOUtils.cleanupWithLogger(LOG, channel); } connectionManager.droppedConnections.getAndIncrement(); continue; @@ -1591,7 +1607,7 @@ private void doPurge(RpcCall call, long now) { Iterator iter = responseQueue.listIterator(0); while (iter.hasNext()) { call = iter.next(); - if (now > call.timestampNanos + PURGE_INTERVAL_NANOS) { + if (now > call.responseTimestampNanos + PURGE_INTERVAL_NANOS) { closeConnection(call.connection); break; } @@ -1655,7 +1671,7 @@ private boolean processResponse(LinkedList responseQueue, if (inHandler) { // set the serve time when the response has to be sent later - call.timestampNanos = Time.monotonicNowNanos(); + call.responseTimestampNanos = Time.monotonicNowNanos(); incPending(); try { @@ -1778,7 +1794,7 @@ public class Connection { private SocketChannel channel; private ByteBuffer data; - private ByteBuffer dataLengthBuffer; + private final ByteBuffer dataLengthBuffer; private LinkedList responseQueue; // number of outstanding rpcs private AtomicInteger rpcCount = new AtomicInteger(); @@ -1804,6 +1820,7 @@ public class Connection { private int serviceClass; private boolean shouldClose = false; private int ingressPort; + private boolean isOnAuxiliaryPort; UserGroupInformation user = null; public UserGroupInformation attemptingUser = null; // user name before auth @@ -1816,7 +1833,7 @@ public class Connection { private boolean useWrap = false; public Connection(SocketChannel channel, long lastContact, - int ingressPort) { + int ingressPort, boolean isOnAuxiliaryPort) { this.channel = channel; this.lastContact = lastContact; this.data = null; @@ -1829,6 +1846,7 @@ public Connection(SocketChannel channel, long lastContact, this.socket = channel.socket(); this.addr = socket.getInetAddress(); this.ingressPort = ingressPort; + this.isOnAuxiliaryPort = isOnAuxiliaryPort; if (addr == null) { this.hostAddress = "*Unknown*"; } else { @@ -1874,7 +1892,11 @@ public InetAddress getHostInetAddress() { public String getEstablishedQOP() { return establishedQOP; } - + + public boolean isOnAuxiliaryPort() { + return isOnAuxiliaryPort; + } + public void setLastContact(long lastContact) { this.lastContact = lastContact; } @@ -2816,16 +2838,15 @@ public void setServiceClass(int serviceClass) { private synchronized void close() { disposeSasl(); data = null; - dataLengthBuffer = null; if (!channel.isOpen()) return; try {socket.shutdownOutput();} catch(Exception e) { LOG.debug("Ignoring socket shutdown exception", e); } if (channel.isOpen()) { - IOUtils.cleanup(null, channel); + IOUtils.cleanupWithLogger(LOG, channel); } - IOUtils.cleanup(null, socket); + IOUtils.cleanupWithLogger(LOG, socket); } } @@ -3112,6 +3133,8 @@ public synchronized void addAuxiliaryListener(int auxiliaryPort) "There is already a listener binding to: " + auxiliaryPort); } Listener newListener = new Listener(auxiliaryPort); + newListener.setIsAuxiliary(); + // in the case of port = 0, the listener would be on a != 0 port. LOG.info("Adding a server listener on port " + newListener.getAddress().getPort()); @@ -3731,11 +3754,13 @@ Connection[] toArray() { return connections.toArray(new Connection[0]); } - Connection register(SocketChannel channel, int ingressPort) { + Connection register(SocketChannel channel, int ingressPort, + boolean isOnAuxiliaryPort) { if (isFull()) { return null; } - Connection connection = new Connection(channel, Time.now(), ingressPort); + Connection connection = new Connection(channel, Time.now(), + ingressPort, isOnAuxiliaryPort); add(connection); if (LOG.isDebugEnabled()) { LOG.debug("Server connection from " + connection + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/DecayRpcSchedulerDetailedMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/DecayRpcSchedulerDetailedMetrics.java new file mode 100644 index 0000000000000..04a6c0eab1c42 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/DecayRpcSchedulerDetailedMetrics.java @@ -0,0 +1,135 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ipc.metrics; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; + +/** + * This class is for maintaining queue (priority) level related + * statistics when FairCallQueue is used and publishing them + * through the metrics interface. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +@Metrics(about="Per queue(priority) metrics", + context="decayrpcschedulerdetailed") +public class DecayRpcSchedulerDetailedMetrics { + + @Metric private MutableRatesWithAggregation rpcQueueRates; + @Metric private MutableRatesWithAggregation rpcProcessingRates; + + private static final Logger LOG = + LoggerFactory.getLogger(DecayRpcSchedulerDetailedMetrics.class); + private final MetricsRegistry registry; + private final String name; + private String[] queueNamesForLevels; + private String[] processingNamesForLevels; + + DecayRpcSchedulerDetailedMetrics(String ns) { + name = "DecayRpcSchedulerDetailedMetrics."+ ns; + registry = new MetricsRegistry("decayrpcschedulerdetailed") + .tag("port", "RPC port", String.valueOf(ns)); + LOG.debug(registry.info().toString()); + } + + public static DecayRpcSchedulerDetailedMetrics create(String ns) { + DecayRpcSchedulerDetailedMetrics m = + new DecayRpcSchedulerDetailedMetrics(ns); + return DefaultMetricsSystem.instance().register(m.name, null, m); + } + + /** + * Initialize the metrics for JMX with priority levels. + */ + public void init(int numLevels) { + LOG.info("Initializing RPC stats for {} priority levels", numLevels); + queueNamesForLevels = new String[numLevels]; + processingNamesForLevels = new String[numLevels]; + for (int i = 0; i < numLevels; i++) { + queueNamesForLevels[i] = getQueueName(i+1); + processingNamesForLevels[i] = getProcessingName(i+1); + } + rpcQueueRates.init(queueNamesForLevels); + rpcProcessingRates.init(processingNamesForLevels); + } + + /** + * Instrument a Call queue time based on its priority. + * + * @param priority of the RPC call + * @param queueTime of the RPC call in the queue of the priority + */ + public void addQueueTime(int priority, long queueTime) { + rpcQueueRates.add(queueNamesForLevels[priority], queueTime); + } + + /** + * Instrument a Call processing time based on its priority. + * + * @param priority of the RPC call + * @param processingTime of the RPC call in the queue of the priority + */ + public void addProcessingTime(int priority, long processingTime) { + rpcProcessingRates.add(processingNamesForLevels[priority], processingTime); + } + + /** + * Shutdown the instrumentation process. + */ + public void shutdown() { + DefaultMetricsSystem.instance().unregisterSource(name); + } + + /** + * Returns the rate name inside the metric. + */ + public String getQueueName(int priority) { + return "DecayRPCSchedulerPriority."+priority+".RpcQueueTime"; + } + + /** + * Returns the rate name inside the metric. + */ + public String getProcessingName(int priority) { + return "DecayRPCSchedulerPriority."+priority+".RpcProcessingTime"; + } + + public String getName() { + return name; + } + + @VisibleForTesting + MutableRatesWithAggregation getRpcQueueRates() { + return rpcQueueRates; + } + + @VisibleForTesting + MutableRatesWithAggregation getRpcProcessingRates() { + return rpcProcessingRates; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolClientSideTranslatorPB.java index 078b2dbd5df7e..f8a3e25867f91 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolClientSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolClientSideTranslatorPB.java @@ -35,8 +35,8 @@ import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseProto; import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseCollectionProto; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; public class GenericRefreshProtocolClientSideTranslatorPB implements ProtocolMetaInterface, GenericRefreshProtocol, Closeable { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolServerSideTranslatorPB.java index ae57cbd367c38..4af72fe04e1b2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolServerSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolServerSideTranslatorPB.java @@ -28,8 +28,8 @@ import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseProto; import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseCollectionProto; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; public class GenericRefreshProtocolServerSideTranslatorPB implements GenericRefreshProtocolPB { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/RefreshCallQueueProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/RefreshCallQueueProtocolClientSideTranslatorPB.java index d18df529b084f..e378a93f87906 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/RefreshCallQueueProtocolClientSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/RefreshCallQueueProtocolClientSideTranslatorPB.java @@ -29,8 +29,8 @@ import org.apache.hadoop.ipc.proto.RefreshCallQueueProtocolProtos.RefreshCallQueueRequestProto; import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolPB; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; public class RefreshCallQueueProtocolClientSideTranslatorPB implements ProtocolMetaInterface, RefreshCallQueueProtocol, Closeable { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/RefreshCallQueueProtocolServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/RefreshCallQueueProtocolServerSideTranslatorPB.java index eb0301165e104..411df9f5674ce 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/RefreshCallQueueProtocolServerSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/RefreshCallQueueProtocolServerSideTranslatorPB.java @@ -24,8 +24,8 @@ import org.apache.hadoop.ipc.proto.RefreshCallQueueProtocolProtos.RefreshCallQueueRequestProto; import org.apache.hadoop.ipc.proto.RefreshCallQueueProtocolProtos.RefreshCallQueueResponseProto; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; public class RefreshCallQueueProtocolServerSideTranslatorPB implements RefreshCallQueueProtocolPB { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java index 528211913d606..cdd0ba4275ce2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java @@ -106,7 +106,7 @@ MBeanInfo get() { } ++curRecNo; } - MetricsSystemImpl.LOG.debug(attrs.toString()); + MetricsSystemImpl.LOG.debug("{}", attrs); MBeanAttributeInfo[] attrsArray = new MBeanAttributeInfo[attrs.size()]; return new MBeanInfo(name, description, attrs.toArray(attrsArray), null, null, null); // no ops/ctors/notifications diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java index aa7b7596173e7..5fe0083aa5dce 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java @@ -79,6 +79,18 @@ public void init(Class protocol) { } } + /** + * Initialize the registry with all rate names passed in. + * This is an alternative to the above init function since this metric + * can be used more than just for rpc name. + * @param names the array of all rate names + */ + public void init(String[] names) { + for (String name : names) { + addMetricIfNotExists(name); + } + } + /** * Add a rate sample for a rate metric. * @param name of the rate metric diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java index 5f9afddc57e0d..f19a2be0b4195 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java @@ -31,6 +31,8 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.log.metrics.EventCounter; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsInfo; @@ -84,7 +86,7 @@ public synchronized void registerIfNeeded(){ final MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean(); final List gcBeans = ManagementFactory.getGarbageCollectorMXBeans(); - final ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean(); + private ThreadMXBean threadMXBean; final String processName, sessionId; private JvmPauseMonitor pauseMonitor = null; final ConcurrentHashMap gcInfoCache = @@ -92,9 +94,12 @@ public synchronized void registerIfNeeded(){ private GcTimeMonitor gcTimeMonitor = null; @VisibleForTesting - JvmMetrics(String processName, String sessionId) { + JvmMetrics(String processName, String sessionId, boolean useThreadMXBean) { this.processName = processName; this.sessionId = sessionId; + if (useThreadMXBean) { + this.threadMXBean = ManagementFactory.getThreadMXBean(); + } } public void setPauseMonitor(final JvmPauseMonitor pauseMonitor) { @@ -108,8 +113,15 @@ public void setGcTimeMonitor(GcTimeMonitor gcTimeMonitor) { public static JvmMetrics create(String processName, String sessionId, MetricsSystem ms) { + // Reloading conf instead of getting from outside since it's redundant in + // code level to update all the callers across lots of modules, + // this method is called at most once for components (NN/DN/RM/NM/...) + // so that the overall cost is not expensive. + boolean useThreadMXBean = new Configuration().getBoolean( + CommonConfigurationKeys.HADOOP_METRICS_JVM_USE_THREAD_MXBEAN, + CommonConfigurationKeys.HADOOP_METRICS_JVM_USE_THREAD_MXBEAN_DEFAULT); return ms.register(JvmMetrics.name(), JvmMetrics.description(), - new JvmMetrics(processName, sessionId)); + new JvmMetrics(processName, sessionId, useThreadMXBean)); } public static void reattach(MetricsSystem ms, JvmMetrics jvmMetrics) { @@ -137,7 +149,11 @@ public void getMetrics(MetricsCollector collector, boolean all) { .tag(SessionId, sessionId); getMemoryUsage(rb); getGcUsage(rb); - getThreadUsage(rb); + if (threadMXBean != null) { + getThreadUsage(rb); + } else { + getThreadUsageFromGroup(rb); + } getEventCounters(rb); } @@ -235,6 +251,39 @@ private void getThreadUsage(MetricsRecordBuilder rb) { .addGauge(ThreadsTerminated, threadsTerminated); } + private void getThreadUsageFromGroup(MetricsRecordBuilder rb) { + int threadsNew = 0; + int threadsRunnable = 0; + int threadsBlocked = 0; + int threadsWaiting = 0; + int threadsTimedWaiting = 0; + int threadsTerminated = 0; + ThreadGroup threadGroup = Thread.currentThread().getThreadGroup(); + Thread[] threads = new Thread[threadGroup.activeCount()]; + threadGroup.enumerate(threads); + for (Thread thread : threads) { + if (thread == null) { + // race protection + continue; + } + switch (thread.getState()) { + case NEW: threadsNew++; break; + case RUNNABLE: threadsRunnable++; break; + case BLOCKED: threadsBlocked++; break; + case WAITING: threadsWaiting++; break; + case TIMED_WAITING: threadsTimedWaiting++; break; + case TERMINATED: threadsTerminated++; break; + default: + } + } + rb.addGauge(ThreadsNew, threadsNew) + .addGauge(ThreadsRunnable, threadsRunnable) + .addGauge(ThreadsBlocked, threadsBlocked) + .addGauge(ThreadsWaiting, threadsWaiting) + .addGauge(ThreadsTimedWaiting, threadsTimedWaiting) + .addGauge(ThreadsTerminated, threadsTerminated); + } + private void getEventCounters(MetricsRecordBuilder rb) { rb.addCounter(LogFatal, EventCounter.getFatal()) .addCounter(LogError, EventCounter.getError()) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java index 061971cb6817f..531ad80f41722 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java @@ -19,7 +19,6 @@ package org.apache.hadoop.net; import com.google.common.net.InetAddresses; -import com.sun.istack.Nullable; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.slf4j.Logger; @@ -36,6 +35,7 @@ import java.util.List; import java.util.Vector; +import javax.annotation.Nullable; import javax.naming.NamingException; import javax.naming.directory.Attributes; import javax.naming.directory.DirContext; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java index d98254cb1ca25..c5a5b111b2b3d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java @@ -37,6 +37,7 @@ import java.net.UnknownHostException; import java.net.ConnectException; import java.nio.channels.SocketChannel; +import java.nio.channels.UnresolvedAddressException; import java.util.Map.Entry; import java.util.regex.Pattern; import java.util.*; @@ -534,6 +535,8 @@ public static void connect(Socket socket, } } catch (SocketTimeoutException ste) { throw new ConnectTimeoutException(ste.getMessage()); + } catch (UnresolvedAddressException uae) { + throw new UnknownHostException(uae.getMessage()); } // There is a very rare case allowed by the TCP specification, such that @@ -638,6 +641,22 @@ public static String getHostNameOfIP(String ipPort) { } } + /** + * Attempt to normalize the given string to "host:port" + * if it like "ip:port". + * + * @param ipPort maybe lik ip:port or host:port. + * @return host:port + */ + public static String normalizeIP2HostName(String ipPort) { + if (null == ipPort || !ipPortPattern.matcher(ipPort).matches()) { + return ipPort; + } + + InetSocketAddress address = createSocketAddr(ipPort); + return getHostPortString(address); + } + /** * Return hostname without throwing exception. * The returned hostname String format is "hostname". diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java index 724cec37ba503..9f52fed9678b9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java @@ -31,6 +31,7 @@ import java.util.*; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; /** The class represents a cluster of computer with a tree hierarchical * network topology. @@ -486,10 +487,10 @@ public Node chooseRandom(final String scope, protected Node chooseRandom(final String scope, String excludedScope, final Collection excludedNodes) { if (excludedScope != null) { - if (scope.startsWith(excludedScope)) { + if (isChildScope(scope, excludedScope)) { return null; } - if (!excludedScope.startsWith(scope)) { + if (!isChildScope(excludedScope, scope)) { excludedScope = null; } } @@ -667,9 +668,12 @@ public int countNumOfAvailableNodes(String scope, if (node == null) { continue; } - if ((NodeBase.getPath(node) + NodeBase.PATH_SEPARATOR_STR) - .startsWith(scope + NodeBase.PATH_SEPARATOR_STR)) { - excludedCountInScope++; + if (isNodeInScope(node, scope)) { + if (node instanceof InnerNode) { + excludedCountInScope += ((InnerNode) node).getNumOfLeaves(); + } else { + excludedCountInScope++; + } } else { excludedCountOffScope++; } @@ -874,11 +878,33 @@ public void sortByDistance(Node reader, Node[] nodes, int activeLen) { * This method is called if the reader is a datanode, * so nonDataNodeReader flag is set to false. */ - sortByDistance(reader, nodes, activeLen, false); + sortByDistance(reader, nodes, activeLen, list -> Collections.shuffle(list)); } /** - * Sort nodes array by network distance to reader. + * Sort nodes array by network distance to reader with secondary sort. + *

+ * In a three-level topology, a node can be either local, on the same rack, + * or on a different rack from the reader. Sorting the nodes based on network + * distance from the reader reduces network traffic and improves + * performance. + *

+ * As an additional twist, we also randomize the nodes at each network + * distance. This helps with load balancing when there is data skew. + * + * @param reader Node where data will be read + * @param nodes Available replicas with the requested data + * @param activeLen Number of active nodes at the front of the array + * @param secondarySort a secondary sorting strategy which can inject into + * that point from outside to help sort the same distance. + */ + public void sortByDistance(Node reader, T[] nodes, + int activeLen, Consumer> secondarySort){ + sortByDistance(reader, nodes, activeLen, secondarySort, false); + } + + /** + * Sort nodes array by network distance to reader with secondary sort. *

using network location. This is used when the reader * is not a datanode. Sorting the nodes based on network distance * from the reader reduces network traffic and improves @@ -895,7 +921,27 @@ public void sortByDistanceUsingNetworkLocation(Node reader, Node[] nodes, * This method is called if the reader is not a datanode, * so nonDataNodeReader flag is set to true. */ - sortByDistance(reader, nodes, activeLen, true); + sortByDistanceUsingNetworkLocation(reader, nodes, activeLen, + list -> Collections.shuffle(list)); + } + + /** + * Sort nodes array by network distance to reader. + *

using network location. This is used when the reader + * is not a datanode. Sorting the nodes based on network distance + * from the reader reduces network traffic and improves + * performance. + *

+ * + * @param reader Node where data will be read + * @param nodes Available replicas with the requested data + * @param activeLen Number of active nodes at the front of the array + * @param secondarySort a secondary sorting strategy which can inject into + * that point from outside to help sort the same distance. + */ + public void sortByDistanceUsingNetworkLocation(Node reader, + T[] nodes, int activeLen, Consumer> secondarySort) { + sortByDistance(reader, nodes, activeLen, secondarySort, true); } /** @@ -903,13 +949,15 @@ public void sortByDistanceUsingNetworkLocation(Node reader, Node[] nodes, *

* As an additional twist, we also randomize the nodes at each network * distance. This helps with load balancing when there is data skew. + * And it helps choose node with more fast storage type. * * @param reader Node where data will be read * @param nodes Available replicas with the requested data * @param activeLen Number of active nodes at the front of the array * @param nonDataNodeReader True if the reader is not a datanode */ - private void sortByDistance(Node reader, Node[] nodes, int activeLen, + private void sortByDistance(Node reader, T[] nodes, + int activeLen, Consumer> secondarySort, boolean nonDataNodeReader) { /** Sort weights for the nodes array */ int[] weights = new int[activeLen]; @@ -921,23 +969,26 @@ private void sortByDistance(Node reader, Node[] nodes, int activeLen, } } // Add weight/node pairs to a TreeMap to sort - TreeMap> tree = new TreeMap>(); + TreeMap> tree = new TreeMap<>(); for (int i=0; i list = tree.get(weight); + T node = nodes[i]; + List list = tree.get(weight); if (list == null) { list = Lists.newArrayListWithExpectedSize(1); tree.put(weight, list); } list.add(node); } - + // Sort nodes which have the same weight using secondarySort. int idx = 0; - for (List list: tree.values()) { + for (List list: tree.values()) { if (list != null) { Collections.shuffle(list, r); - for (Node n: list) { + if (secondarySort != null) { + secondarySort.accept(list); + } + for (T n: list) { nodes[idx] = n; idx++; } @@ -946,4 +997,33 @@ private void sortByDistance(Node reader, Node[] nodes, int activeLen, Preconditions.checkState(idx == activeLen, "Sorted the wrong number of nodes!"); } -} + + /** + * Checks whether one scope is contained in the other scope. + * @param parentScope the parent scope to check + * @param childScope the child scope which needs to be checked. + * @return true if childScope is contained within the parentScope + */ + protected static boolean isChildScope(final String parentScope, + final String childScope) { + String pScope = parentScope.endsWith(NodeBase.PATH_SEPARATOR_STR) ? + parentScope : parentScope + NodeBase.PATH_SEPARATOR_STR; + String cScope = childScope.endsWith(NodeBase.PATH_SEPARATOR_STR) ? + childScope : childScope + NodeBase.PATH_SEPARATOR_STR; + return pScope.startsWith(cScope); + } + + /** + * Checks whether a node belongs to the scope. + * @param node the node to check. + * @param scope scope to check. + * @return true if node lies within the scope + */ + protected static boolean isNodeInScope(Node node, String scope) { + if (!scope.endsWith(NodeBase.PATH_SEPARATOR_STR)) { + scope += NodeBase.PATH_SEPARATOR_STR; + } + String nodeLocation = NodeBase.getPath(node) + NodeBase.PATH_SEPARATOR_STR; + return nodeLocation.startsWith(scope); + } +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java index 9da9ca2948f9e..cc14df8519c96 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java @@ -20,6 +20,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import java.util.regex.Pattern; + /** A base class that implements interface Node * */ @@ -38,6 +40,7 @@ public class NodeBase implements Node { protected String location; //string representation of this node's location protected int level; //which level of the tree the node resides protected Node parent; //its parent + private static final Pattern SLASHES = Pattern.compile("/+"); /** Default constructor */ public NodeBase() { @@ -160,12 +163,15 @@ public static String normalize(String path) { if (path.length() == 0) { return ROOT; } - + if (path.charAt(0) != PATH_SEPARATOR) { throw new IllegalArgumentException( "Network Location path does not start with " +PATH_SEPARATOR_STR+ ": "+path); } + + // Remove duplicated slashes. + path = SLASHES.matcher(path).replaceAll("/"); int len = path.length(); if (path.charAt(len-1) == PATH_SEPARATOR) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java index f489581843f6c..312a481f25a86 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java @@ -326,34 +326,36 @@ int select(SelectableChannel channel, int ops, long timeout) SelectionKey key = null; int ret = 0; + long timeoutLeft = timeout; try { while (true) { long start = (timeout == 0) ? 0 : Time.now(); key = channel.register(info.selector, ops); - ret = info.selector.select(timeout); + ret = info.selector.select(timeoutLeft); if (ret != 0) { return ret; } - if (Thread.currentThread().isInterrupted()) { - throw new InterruptedIOException("Interrupted while waiting for " - + "IO on channel " + channel + ". " + timeout - + " millis timeout left."); - } - /* Sometimes select() returns 0 much before timeout for * unknown reasons. So select again if required. */ if (timeout > 0) { - timeout -= Time.now() - start; - if (timeout <= 0) { - return 0; - } + timeoutLeft -= Time.now() - start; + timeoutLeft = Math.max(0, timeoutLeft); } + if (Thread.currentThread().isInterrupted()) { + throw new InterruptedIOException("Interrupted while waiting for " + + "IO on channel " + channel + ". Total timeout mills is " + + timeout + ", " + timeoutLeft + " millis timeout left."); + } + + if (timeoutLeft == 0) { + return 0; + } } } finally { if (key != null) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java index 37cf021d41c51..e71bf6d40dd21 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java @@ -18,7 +18,7 @@ package org.apache.hadoop.security; -import com.google.protobuf.ByteString; +import org.apache.hadoop.thirdparty.protobuf.ByteString; import java.io.BufferedInputStream; import java.io.DataInput; @@ -46,6 +46,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.proto.SecurityProtos.CredentialsKVProto; @@ -368,7 +369,7 @@ void writeProto(DataOutput out) throws IOException { CredentialsKVProto.Builder kv = CredentialsKVProto.newBuilder(). setAliasBytes(ByteString.copyFrom( e.getKey().getBytes(), 0, e.getKey().getLength())). - setToken(e.getValue().toTokenProto()); + setToken(ProtobufHelper.protoFromToken(e.getValue())); storage.addTokens(kv.build()); } @@ -390,7 +391,7 @@ void readProto(DataInput in) throws IOException { CredentialsProto storage = CredentialsProto.parseDelimitedFrom((DataInputStream)in); for (CredentialsKVProto kv : storage.getTokensList()) { addToken(new Text(kv.getAliasBytes().toByteArray()), - (Token) new Token(kv.getToken())); + ProtobufHelper.tokenFromProto(kv.getToken())); } for (CredentialsKVProto kv : storage.getSecretsList()) { addSecretKey(new Text(kv.getAliasBytes().toByteArray()), diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java index a63ad4fdbbb21..215f473b9fcc9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java @@ -72,7 +72,7 @@ import org.apache.hadoop.util.ProtoUtil; import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.ByteString; +import org.apache.hadoop.thirdparty.protobuf.ByteString; import com.google.re2j.Pattern; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 91c51f2a32f39..8c84a8d31a063 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -189,21 +189,15 @@ private T getCanonicalUser(Class cls) { @Override public boolean commit() throws LoginException { - if (LOG.isDebugEnabled()) { - LOG.debug("hadoop login commit"); - } + LOG.debug("hadoop login commit"); // if we already have a user, we are done. if (!subject.getPrincipals(User.class).isEmpty()) { - if (LOG.isDebugEnabled()) { - LOG.debug("using existing subject:"+subject.getPrincipals()); - } + LOG.debug("Using existing subject: {}", subject.getPrincipals()); return true; } Principal user = getCanonicalUser(KerberosPrincipal.class); if (user != null) { - if (LOG.isDebugEnabled()) { - LOG.debug("using kerberos user:"+user); - } + LOG.debug("Using kerberos user: {}", user); } //If we don't have a kerberos user and security is disabled, check //if user is specified in the environment or properties @@ -217,15 +211,11 @@ public boolean commit() throws LoginException { // use the OS user if (user == null) { user = getCanonicalUser(OS_PRINCIPAL_CLASS); - if (LOG.isDebugEnabled()) { - LOG.debug("using local user:"+user); - } + LOG.debug("Using local user: {}", user); } // if we found the user, add our principal if (user != null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Using user: \"" + user + "\" with name " + user.getName()); - } + LOG.debug("Using user: \"{}\" with name: {}", user, user.getName()); User userEntry = null; try { @@ -237,15 +227,12 @@ public boolean commit() throws LoginException { } catch (Exception e) { throw (LoginException)(new LoginException(e.toString()).initCause(e)); } - if (LOG.isDebugEnabled()) { - LOG.debug("User entry: \"" + userEntry.toString() + "\"" ); - } + LOG.debug("User entry: \"{}\"", userEntry); subject.getPrincipals().add(userEntry); return true; } - LOG.error("Can't find user in " + subject); - throw new LoginException("Can't find user name"); + throw new LoginException("Failed to find user in name " + subject); } @Override @@ -256,17 +243,13 @@ public void initialize(Subject subject, CallbackHandler callbackHandler, @Override public boolean login() throws LoginException { - if (LOG.isDebugEnabled()) { - LOG.debug("hadoop login"); - } + LOG.debug("Hadoop login"); return true; } @Override public boolean logout() throws LoginException { - if (LOG.isDebugEnabled()) { - LOG.debug("hadoop logout"); - } + LOG.debug("Hadoop logout"); return true; } } @@ -444,23 +427,12 @@ static Optional getKerberosLoginRenewalExecutor() { private static final boolean windows = System.getProperty("os.name").startsWith("Windows"); - private static final boolean is64Bit = - System.getProperty("os.arch").contains("64") || - System.getProperty("os.arch").contains("s390x"); - private static final boolean aix = System.getProperty("os.name").equals("AIX"); /* Return the OS login module class name */ + /* For IBM JDK, use the common OS login module class name for all platforms */ private static String getOSLoginModuleName() { if (IBM_JAVA) { - if (windows) { - return is64Bit ? "com.ibm.security.auth.module.Win64LoginModule" - : "com.ibm.security.auth.module.NTLoginModule"; - } else if (aix) { - return is64Bit ? "com.ibm.security.auth.module.AIX64LoginModule" - : "com.ibm.security.auth.module.AIXLoginModule"; - } else { - return "com.ibm.security.auth.module.LinuxLoginModule"; - } + return "com.ibm.security.auth.module.JAASLoginModule"; } else { return windows ? "com.sun.security.auth.module.NTLoginModule" : "com.sun.security.auth.module.UnixLoginModule"; @@ -468,23 +440,14 @@ private static String getOSLoginModuleName() { } /* Return the OS principal class */ + /* For IBM JDK, use the common OS principal class for all platforms */ @SuppressWarnings("unchecked") private static Class getOsPrincipalClass() { ClassLoader cl = ClassLoader.getSystemClassLoader(); try { String principalClass = null; if (IBM_JAVA) { - if (is64Bit) { - principalClass = "com.ibm.security.auth.UsernamePrincipal"; - } else { - if (windows) { - principalClass = "com.ibm.security.auth.NTUserPrincipal"; - } else if (aix) { - principalClass = "com.ibm.security.auth.AIXPrincipal"; - } else { - principalClass = "com.ibm.security.auth.LinuxPrincipal"; - } - } + principalClass = "com.ibm.security.auth.UsernamePrincipal"; } else { principalClass = windows ? "com.sun.security.auth.NTUserPrincipal" : "com.sun.security.auth.UnixPrincipal"; @@ -601,6 +564,7 @@ public boolean hasKerberosCredentials() { @InterfaceAudience.Public @InterfaceStability.Evolving public static UserGroupInformation getCurrentUser() throws IOException { + ensureInitialized(); AccessControlContext context = AccessController.getContext(); Subject subject = Subject.getSubject(context); if (subject == null || subject.getPrincipals(User.class).isEmpty()) { @@ -687,6 +651,7 @@ public static UserGroupInformation getUGIFromSubject(Subject subject) @InterfaceAudience.Public @InterfaceStability.Evolving public static UserGroupInformation getLoginUser() throws IOException { + ensureInitialized(); UserGroupInformation loginUser = loginUserRef.get(); // a potential race condition exists only for the initial creation of // the login user. there's no need to penalize all subsequent calls @@ -808,12 +773,10 @@ UserGroupInformation createLoginUser(Subject subject) throws IOException { LOG.debug("Loaded {} base64 tokens", numTokenBase64); } } catch (IOException ioe) { - LOG.debug("failure to load login credentials", ioe); + LOG.debug("Failure to load login credentials", ioe); throw ioe; } - if (LOG.isDebugEnabled()) { - LOG.debug("UGI loginUser:"+loginUser); - } + LOG.debug("UGI loginUser: {}", loginUser); return loginUser; } @@ -994,10 +957,7 @@ public void run() { do { try { long now = Time.now(); - if (LOG.isDebugEnabled()) { - LOG.debug("Current time is " + now); - LOG.debug("Next refresh is " + nextRefresh); - } + LOG.debug("Current time is {}, next refresh is {}", now, nextRefresh); if (now < nextRefresh) { Thread.sleep(nextRefresh - now); } @@ -1033,18 +993,18 @@ public void run() { try { tgtEndTime = tgt.getEndTime().getTime(); } catch (NullPointerException npe) { - LOG.error(String.format("NPE thrown while getting " + - "KerberosTicket endTime. Aborting renew thread for %s.", - getUserName()), ie); + LOG.error("NPE thrown while getting KerberosTicket endTime. " + + "Aborting renew thread for {}.", getUserName(), ie); return; } - LOG.warn(String.format("Exception encountered while running the " + - "renewal command for %s. " + - "(TGT end time:%d, renewalFailures: %d, " + - "renewalFailuresTotal: %d)", getUserName(), tgtEndTime, - metrics.renewalFailures.value(), - metrics.renewalFailuresTotal.value()), ie); + LOG.warn( + "Exception encountered while running the " + + "renewal command for {}. " + + "(TGT end time:{}, renewalFailures: {}, " + + "renewalFailuresTotal: {})", + getUserName(), tgtEndTime, metrics.renewalFailures.value(), + metrics.renewalFailuresTotal.value(), ie); if (rp == null) { // Use a dummy maxRetries to create the policy. The policy will // only be used to get next retry time with exponential back-off. @@ -1091,9 +1051,7 @@ final class TicketCacheRenewalRunnable @Override public void relogin() throws IOException { String output = Shell.execCommand(kinitCmd, "-R"); - if (LOG.isDebugEnabled()) { - LOG.debug("Renewed ticket. kinit output: {}", output); - } + LOG.debug("Renewed ticket. kinit output: {}", output); reloginFromTicketCache(); } } @@ -1193,9 +1151,7 @@ public void logoutUserFromKeytab() throws IOException { } try { - if (LOG.isDebugEnabled()) { - LOG.debug("Initiating logout for " + getUserName()); - } + LOG.debug("Initiating logout for {}", getUserName()); // hadoop login context internally locks credentials. login.logout(); } catch (LoginException le) { @@ -1329,9 +1285,7 @@ private void unprotectedRelogin(HadoopLoginContext login) throws IOException { // register most recent relogin attempt user.setLastLogin(now); try { - if (LOG.isDebugEnabled()) { - LOG.debug("Initiating logout for " + getUserName()); - } + LOG.debug("Initiating logout for {}", getUserName()); //clear up the kerberos state. But the tokens are not cleared! As per //the Java kerberos login module code, only the kerberos credentials //are cleared @@ -1340,9 +1294,7 @@ private void unprotectedRelogin(HadoopLoginContext login) throws IOException { //have the new credentials (pass it to the LoginContext constructor) login = newLoginContext( login.getAppName(), login.getSubject(), login.getConfiguration()); - if (LOG.isDebugEnabled()) { - LOG.debug("Initiating re-login for " + getUserName()); - } + LOG.debug("Initiating re-login for {}", getUserName()); login.login(); // this should be unnecessary. originally added due to improper locking // of the subject during relogin. @@ -1748,11 +1700,7 @@ public List getGroups() { try { return groups.getGroups(getShortUserName()); } catch (IOException ie) { - if (LOG.isDebugEnabled()) { - LOG.debug("Failed to get groups for user " + getShortUserName() - + " by " + ie); - LOG.trace("TRACE", ie); - } + LOG.debug("Failed to get groups for user {}", getShortUserName(), ie); return Collections.emptyList(); } } @@ -1867,7 +1815,10 @@ protected Subject getSubject() { @InterfaceAudience.Public @InterfaceStability.Evolving public T doAs(PrivilegedAction action) { - logPrivilegedAction(subject, action); + if (LOG.isDebugEnabled()) { + LOG.debug("PrivilegedAction [as: {}][action: {}]", this, action, + new Exception()); + } return Subject.doAs(subject, action); } @@ -1887,13 +1838,14 @@ public T doAs(PrivilegedAction action) { public T doAs(PrivilegedExceptionAction action ) throws IOException, InterruptedException { try { - logPrivilegedAction(subject, action); + if (LOG.isDebugEnabled()) { + LOG.debug("PrivilegedAction [as: {}][action: {}]", this, action, + new Exception()); + } return Subject.doAs(subject, action); } catch (PrivilegedActionException pae) { Throwable cause = pae.getCause(); - if (LOG.isDebugEnabled()) { - LOG.debug("PrivilegedActionException as:" + this + " cause:" + cause); - } + LOG.debug("PrivilegedActionException as: {}", this, cause); if (cause == null) { throw new RuntimeException("PrivilegedActionException with no " + "underlying cause. UGI [" + this + "]" +": " + pae, pae); @@ -1911,14 +1863,6 @@ public T doAs(PrivilegedExceptionAction action } } - private void logPrivilegedAction(Subject subject, Object action) { - if (LOG.isDebugEnabled()) { - // would be nice if action included a descriptive toString() - String where = new Throwable().getStackTrace()[2].toString(); - LOG.debug("PrivilegedAction as:"+this+" from:"+where); - } - } - /** * Log current UGI and token information into specified log. * @param ugi - UGI diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java index e69f3ad6f87b3..6e7a856aecc39 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java @@ -29,8 +29,8 @@ import org.apache.hadoop.security.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclRequestProto; import org.apache.hadoop.security.protocolPB.RefreshAuthorizationPolicyProtocolPB; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; public class RefreshAuthorizationPolicyProtocolClientSideTranslatorPB implements ProtocolMetaInterface, RefreshAuthorizationPolicyProtocol, Closeable { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java index 051ffcf103a11..cd5a51885a480 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java @@ -24,8 +24,8 @@ import org.apache.hadoop.security.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclRequestProto; import org.apache.hadoop.security.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclResponseProto; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; public class RefreshAuthorizationPolicyProtocolServerSideTranslatorPB implements RefreshAuthorizationPolicyProtocolPB { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java index dbc333d8b121a..ac4003881ba5c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java @@ -29,8 +29,8 @@ import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationRequestProto; import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsRequestProto; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; public class RefreshUserMappingsProtocolClientSideTranslatorPB implements ProtocolMetaInterface, RefreshUserMappingsProtocol, Closeable { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java index f9dd649f9fd32..71881ed4898c0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java @@ -26,8 +26,8 @@ import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsRequestProto; import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsResponseProto; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; public class RefreshUserMappingsProtocolServerSideTranslatorPB implements RefreshUserMappingsProtocolPB { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java index ad97a99c6ddc2..ff650d6c392db 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.net.InetAddress; import java.net.Socket; -import java.net.SocketException; import java.security.KeyManagementException; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; @@ -31,11 +30,9 @@ import javax.net.ssl.SSLSocket; import javax.net.ssl.SSLSocketFactory; +import com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.wildfly.openssl.OpenSSLProvider; -import org.wildfly.openssl.SSL; - /** * A {@link SSLSocketFactory} that can delegate to various SSL implementations. @@ -58,6 +55,10 @@ * SSL with no modification to the list of enabled ciphers. * *

+ * + * In order to load OpenSSL, applications must ensure the wildfly-openssl + * artifact is on the classpath. Currently, only ABFS declares + * wildfly-openssl as an explicit dependency. */ public final class DelegatingSSLSocketFactory extends SSLSocketFactory { @@ -106,7 +107,16 @@ public static synchronized void initializeDefaultFactory( } /** - * Singletone instance of the SSLSocketFactory. + * For testing only: reset the socket factory. + */ + @VisibleForTesting + public static synchronized void resetDefaultFactory() { + LOG.info("Resetting default SSL Socket Factory"); + instance = null; + } + + /** + * Singleton instance of the SSLSocketFactory. * * SSLSocketFactory must be initialized with appropriate SSLChannelMode * using initializeDefaultFactory method. @@ -122,9 +132,7 @@ private DelegatingSSLSocketFactory(SSLChannelMode preferredChannelMode) throws IOException { try { initializeSSLContext(preferredChannelMode); - } catch (NoSuchAlgorithmException e) { - throw new IOException(e); - } catch (KeyManagementException e) { + } catch (NoSuchAlgorithmException | KeyManagementException e) { throw new IOException(e); } @@ -142,36 +150,23 @@ private DelegatingSSLSocketFactory(SSLChannelMode preferredChannelMode) } private void initializeSSLContext(SSLChannelMode preferredChannelMode) - throws NoSuchAlgorithmException, KeyManagementException { + throws NoSuchAlgorithmException, KeyManagementException, IOException { + LOG.debug("Initializing SSL Context to channel mode {}", + preferredChannelMode); switch (preferredChannelMode) { case Default: - if (!openSSLProviderRegistered) { - OpenSSLProvider.register(); - openSSLProviderRegistered = true; - } try { - java.util.logging.Logger logger = java.util.logging.Logger.getLogger( - SSL.class.getName()); - logger.setLevel(Level.WARNING); - ctx = SSLContext.getInstance("openssl.TLS"); - ctx.init(null, null, null); - // Strong reference needs to be kept to logger until initialization of - // SSLContext finished (see HADOOP-16174): - logger.setLevel(Level.INFO); + bindToOpenSSLProvider(); channelMode = SSLChannelMode.OpenSSL; - } catch (NoSuchAlgorithmException e) { - LOG.debug("Failed to load OpenSSL. Falling back to the JSSE default."); + } catch (LinkageError | NoSuchAlgorithmException | RuntimeException e) { + LOG.debug("Failed to load OpenSSL. Falling back to the JSSE default.", + e); ctx = SSLContext.getDefault(); channelMode = SSLChannelMode.Default_JSSE; } break; case OpenSSL: - if (!openSSLProviderRegistered) { - OpenSSLProvider.register(); - openSSLProviderRegistered = true; - } - ctx = SSLContext.getInstance("openssl.TLS"); - ctx.init(null, null, null); + bindToOpenSSLProvider(); channelMode = SSLChannelMode.OpenSSL; break; case Default_JSSE: @@ -183,11 +178,38 @@ private void initializeSSLContext(SSLChannelMode preferredChannelMode) channelMode = SSLChannelMode.Default_JSSE_with_GCM; break; default: - throw new NoSuchAlgorithmException("Unknown channel mode: " + throw new IOException("Unknown channel mode: " + preferredChannelMode); } } + /** + * Bind to the OpenSSL provider via wildfly. + * This MUST be the only place where wildfly classes are referenced, + * so ensuring that any linkage problems only surface here where they may + * be caught by the initialization code. + */ + private void bindToOpenSSLProvider() + throws NoSuchAlgorithmException, KeyManagementException { + if (!openSSLProviderRegistered) { + LOG.debug("Attempting to register OpenSSL provider"); + org.wildfly.openssl.OpenSSLProvider.register(); + openSSLProviderRegistered = true; + } + // Strong reference needs to be kept to logger until initialization of + // SSLContext finished (see HADOOP-16174): + java.util.logging.Logger logger = java.util.logging.Logger.getLogger( + "org.wildfly.openssl.SSL"); + Level originalLevel = logger.getLevel(); + try { + logger.setLevel(Level.WARNING); + ctx = SSLContext.getInstance("openssl.TLS"); + ctx.init(null, null, null); + } finally { + logger.setLevel(originalLevel); + } + } + public String getProviderName() { return providerName; } @@ -202,21 +224,26 @@ public String[] getSupportedCipherSuites() { return ciphers.clone(); } + /** + * Get the channel mode of this instance. + * @return a channel mode. + */ + public SSLChannelMode getChannelMode() { + return channelMode; + } + public Socket createSocket() throws IOException { SSLSocketFactory factory = ctx.getSocketFactory(); - SSLSocket ss = (SSLSocket) factory.createSocket(); - configureSocket(ss); - return ss; + return configureSocket(factory.createSocket()); } @Override public Socket createSocket(Socket s, String host, int port, boolean autoClose) throws IOException { SSLSocketFactory factory = ctx.getSocketFactory(); - SSLSocket ss = (SSLSocket) factory.createSocket(s, host, port, autoClose); - configureSocket(ss); - return ss; + return configureSocket( + factory.createSocket(s, host, port, autoClose)); } @Override @@ -224,52 +251,41 @@ public Socket createSocket(InetAddress address, int port, InetAddress localAddress, int localPort) throws IOException { SSLSocketFactory factory = ctx.getSocketFactory(); - SSLSocket ss = (SSLSocket) factory - .createSocket(address, port, localAddress, localPort); - - configureSocket(ss); - return ss; + return configureSocket(factory + .createSocket(address, port, localAddress, localPort)); } @Override public Socket createSocket(String host, int port, InetAddress localHost, int localPort) throws IOException { SSLSocketFactory factory = ctx.getSocketFactory(); - SSLSocket ss = (SSLSocket) factory - .createSocket(host, port, localHost, localPort); - configureSocket(ss); - - return ss; + return configureSocket(factory + .createSocket(host, port, localHost, localPort)); } @Override public Socket createSocket(InetAddress host, int port) throws IOException { SSLSocketFactory factory = ctx.getSocketFactory(); - SSLSocket ss = (SSLSocket) factory.createSocket(host, port); - - configureSocket(ss); - return ss; + return configureSocket(factory.createSocket(host, port)); } @Override public Socket createSocket(String host, int port) throws IOException { SSLSocketFactory factory = ctx.getSocketFactory(); - SSLSocket ss = (SSLSocket) factory.createSocket(host, port); - - configureSocket(ss); - return ss; + return configureSocket(factory.createSocket(host, port)); } - private void configureSocket(SSLSocket ss) throws SocketException { - ss.setEnabledCipherSuites(ciphers); + private Socket configureSocket(Socket socket) { + ((SSLSocket) socket).setEnabledCipherSuites(ciphers); + return socket; } private String[] alterCipherList(String[] defaultCiphers) { - ArrayList preferredSuits = new ArrayList<>(); + ArrayList preferredSuites = new ArrayList<>(); // Remove GCM mode based ciphers from the supported list. for (int i = 0; i < defaultCiphers.length; i++) { @@ -277,11 +293,11 @@ private String[] alterCipherList(String[] defaultCiphers) { LOG.debug("Removed Cipher - {} from list of enabled SSLSocket ciphers", defaultCiphers[i]); } else { - preferredSuits.add(defaultCiphers[i]); + preferredSuites.add(defaultCiphers[i]); } } - ciphers = preferredSuits.toArray(new String[0]); + ciphers = preferredSuites.toArray(new String[0]); return ciphers; } -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java index 487dd4625202e..4f0f6fc4d444a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java @@ -19,7 +19,6 @@ package org.apache.hadoop.security.token; import com.google.common.collect.Maps; -import com.google.protobuf.ByteString; import com.google.common.primitives.Bytes; import org.apache.commons.codec.binary.Base64; @@ -28,7 +27,6 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.*; -import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.util.ReflectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -117,32 +115,6 @@ public Token copyToken() { return new Token(this); } - /** - * Construct a Token from a TokenProto. - * @param tokenPB the TokenProto object - */ - public Token(TokenProto tokenPB) { - this.identifier = tokenPB.getIdentifier().toByteArray(); - this.password = tokenPB.getPassword().toByteArray(); - this.kind = new Text(tokenPB.getKindBytes().toByteArray()); - this.service = new Text(tokenPB.getServiceBytes().toByteArray()); - } - - /** - * Construct a TokenProto from this Token instance. - * @return a new TokenProto object holding copies of data in this instance - */ - public TokenProto toTokenProto() { - return TokenProto.newBuilder(). - setIdentifier(ByteString.copyFrom(this.getIdentifier())). - setPassword(ByteString.copyFrom(this.getPassword())). - setKindBytes(ByteString.copyFrom( - this.getKind().getBytes(), 0, this.getKind().getLength())). - setServiceBytes(ByteString.copyFrom( - this.getService().getBytes(), 0, this.getService().getLength())). - build(); - } - /** * Get the token identifier's byte representation. * @return the token identifier's byte representation diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java index f61590c28ebce..cd3b8c0c0f279 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java @@ -98,12 +98,16 @@ public abstract class ZKDelegationTokenSecretManager versionedValue = sharedCount.getVersionedValue(); - if (sharedCount.trySetCount(versionedValue, versionedValue.getValue() + 1)) { - break; + if (sharedCount.trySetCount( + versionedValue, versionedValue.getValue() + batchSize)) { + return versionedValue.getValue(); } } } @Override protected int incrementDelegationTokenSeqNum() { - try { - incrSharedCount(delTokSeqCounter); - } catch (InterruptedException e) { - // The ExpirationThread is just finishing.. so dont do anything.. - LOG.debug("Thread interrupted while performing token counter increment", e); - Thread.currentThread().interrupt(); - } catch (Exception e) { - throw new RuntimeException("Could not increment shared counter !!", e); + // The secret manager will keep a local range of seq num which won't be + // seen by peers, so only when the range is exhausted it will ask zk for + // another range again + if (currentSeqNum >= currentMaxSeqNum) { + try { + // after a successful batch request, we can get the range starting point + currentSeqNum = incrSharedCount(delTokSeqCounter, seqNumBatchSize); + currentMaxSeqNum = currentSeqNum + seqNumBatchSize; + LOG.info("Fetched new range of seq num, from {} to {} ", + currentSeqNum+1, currentMaxSeqNum); + } catch (InterruptedException e) { + // The ExpirationThread is just finishing.. so dont do anything.. + LOG.debug( + "Thread interrupted while performing token counter increment", e); + Thread.currentThread().interrupt(); + } catch (Exception e) { + throw new RuntimeException("Could not increment shared counter !!", e); + } } - return delTokSeqCounter.getCount(); + + return ++currentSeqNum; } @Override @@ -603,7 +631,7 @@ protected int getCurrentKeyId() { @Override protected int incrementCurrentKeyId() { try { - incrSharedCount(keyIdSeqCounter); + incrSharedCount(keyIdSeqCounter, 1); } catch (InterruptedException e) { // The ExpirationThread is just finishing.. so dont do anything.. LOG.debug("Thread interrupted while performing keyId increment", e); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java index ed97f0526527e..b0c0fda542448 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java @@ -28,8 +28,8 @@ import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto; import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; public class GetUserMappingsProtocolClientSideTranslatorPB implements ProtocolMetaInterface, GetUserMappingsProtocol, Closeable { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolServerSideTranslatorPB.java index 4f49418a093ba..a3a7af79b530b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolServerSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolServerSideTranslatorPB.java @@ -24,8 +24,8 @@ import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto; import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; public class GetUserMappingsProtocolServerSideTranslatorPB implements GetUserMappingsProtocolPB { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdminProtocolServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdminProtocolServerSideTranslatorPB.java index f6a4ea33b9bb8..5b49e2e0d5ef0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdminProtocolServerSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdminProtocolServerSideTranslatorPB.java @@ -32,8 +32,8 @@ import org.apache.hadoop.tracing.TraceAdminPB.ConfigPair; import org.apache.hadoop.tracing.TraceAdminPB.RemoveSpanReceiverRequestProto; import org.apache.hadoop.tracing.TraceAdminPB.RemoveSpanReceiverResponseProto; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; @InterfaceAudience.Private public class TraceAdminProtocolServerSideTranslatorPB diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdminProtocolTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdminProtocolTranslatorPB.java index edee2e2fa1367..a5cba39844c83 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdminProtocolTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdminProtocolTranslatorPB.java @@ -33,7 +33,7 @@ import org.apache.hadoop.tracing.TraceAdminPB.RemoveSpanReceiverRequestProto; import org.apache.hadoop.tracing.TraceAdminPB.SpanReceiverListInfo; import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; @InterfaceAudience.Private public class TraceAdminProtocolTranslatorPB implements diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java index 595aeed87af17..2ee53dc595f99 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java @@ -296,7 +296,7 @@ private static void diskIoCheckWithoutNativeIo(File file) } file = null; } finally { - IOUtils.cleanup(null, fos); + IOUtils.cleanupWithLogger(LOG, fos); FileUtils.deleteQuietly(file); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java index 605d060270f8e..490562bf54ece 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DurationInfo.java @@ -18,6 +18,8 @@ package org.apache.hadoop.util; +import java.util.function.Supplier; + import org.slf4j.Logger; import org.apache.hadoop.classification.InterfaceAudience.Public; @@ -33,7 +35,10 @@ @Unstable public class DurationInfo extends OperationDuration implements AutoCloseable { - private final String text; + + private final Supplier text; + + private String textStr; private final Logger log; @@ -65,19 +70,25 @@ public DurationInfo(Logger log, boolean logAtInfo, String format, Object... args) { - this.text = String.format(format, args); + this.text = () -> String.format(format, args); this.log = log; this.logAtInfo = logAtInfo; if (logAtInfo) { - log.info("Starting: {}", text); + log.info("Starting: {}", getFormattedText()); } else { - log.debug("Starting: {}", text); + if (log.isDebugEnabled()) { + log.debug("Starting: {}", getFormattedText()); + } } } + private String getFormattedText() { + return (textStr == null) ? (textStr = text.get()) : textStr; + } + @Override public String toString() { - return text + ": duration " + super.toString(); + return getFormattedText() + ": duration " + super.toString(); } @Override @@ -86,7 +97,9 @@ public void close() { if (logAtInfo) { log.info("{}", this); } else { - log.debug("{}", this); + if (log.isDebugEnabled()) { + log.debug("{}", this); + } } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java index 0640fc01e2ffd..4247eb7050b5a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java @@ -23,6 +23,7 @@ import java.lang.management.GarbageCollectorMXBean; import java.lang.management.ManagementFactory; import java.util.List; +import java.util.concurrent.TimeUnit; /** * This class monitors the percentage of time the JVM is paused in GC within @@ -46,6 +47,52 @@ public class GcTimeMonitor extends Thread { private final GcData curData = new GcData(); private volatile boolean shouldRun = true; + public static class Builder { + + private long observationWindowMs = TimeUnit.MINUTES.toMillis(1); + private long sleepIntervalMs = TimeUnit.SECONDS.toMillis(5); + private int maxGcTimePercentage = 100; + private GcTimeAlertHandler handler = null; + + /** + * Set observation window size in milliseconds. + */ + public Builder observationWindowMs(long value) { + this.observationWindowMs = value; + return this; + } + + /** + * Set sleep interval in milliseconds. + */ + public Builder sleepIntervalMs(long value) { + this.sleepIntervalMs = value; + return this; + } + + /** + * Set the max GC time percentage that triggers the alert handler. + */ + public Builder maxGcTimePercentage(int value) { + this.maxGcTimePercentage = value; + return this; + } + + /** + * Set the GC alert handler. + */ + public Builder gcTimeAlertHandler(GcTimeAlertHandler value) { + this.handler = value; + return this; + } + + public GcTimeMonitor build() { + return new GcTimeMonitor(observationWindowMs, sleepIntervalMs, + maxGcTimePercentage, handler); + } + } + + /** * Create an instance of GCTimeMonitor. Once it's started, it will stay alive * and monitor GC time percentage until shutdown() is called. If you don't diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericsUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericsUtil.java index a53096a9577d2..0aba34845a676 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericsUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericsUtil.java @@ -25,7 +25,6 @@ import org.apache.hadoop.classification.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.slf4j.impl.Log4jLoggerAdapter; /** * Contains utility methods for dealing with Java Generics. @@ -85,6 +84,11 @@ public static boolean isLog4jLogger(Class clazz) { return false; } Logger log = LoggerFactory.getLogger(clazz); - return log instanceof Log4jLoggerAdapter; + try { + Class log4jClass = Class.forName("org.slf4j.impl.Log4jLoggerAdapter"); + return log4jClass.isInstance(log); + } catch (ClassNotFoundException e) { + return false; + } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java index 6f41b8c750dc9..5141740a3d23e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java @@ -52,6 +52,8 @@ public class HostsFileReader { .class); private final AtomicReference current; + private final AtomicReference lazyLoaded = + new AtomicReference<>(); public HostsFileReader(String inFile, String exFile) throws IOException { @@ -187,7 +189,18 @@ static String readFirstTagValue(Element e, String tag) { public void refresh(String includesFile, String excludesFile) throws IOException { - LOG.info("Refreshing hosts (include/exclude) list"); + refreshInternal(includesFile, excludesFile, false); + } + + public void lazyRefresh(String includesFile, String excludesFile) + throws IOException { + refreshInternal(includesFile, excludesFile, true); + } + + private void refreshInternal(String includesFile, String excludesFile, + boolean lazy) throws IOException { + LOG.info("Refreshing hosts (include/exclude) list (lazy refresh = {})", + lazy); HostDetails oldDetails = current.get(); Set newIncludes = oldDetails.includes; Map newExcludes = oldDetails.excludes; @@ -203,7 +216,21 @@ public void refresh(String includesFile, String excludesFile) } HostDetails newDetails = new HostDetails(includesFile, newIncludes, excludesFile, newExcludes); - current.set(newDetails); + + if (lazy) { + lazyLoaded.set(newDetails); + } else { + current.set(newDetails); + } + } + + public void finishRefresh() { + if (lazyLoaded.get() == null) { + throw new IllegalStateException( + "Cannot finish refresh - call lazyRefresh() first"); + } + current.set(lazyLoaded.get()); + lazyLoaded.set(null); } @Private @@ -279,6 +306,10 @@ public HostDetails getHostDetails() { return current.get(); } + public HostDetails getLazyLoadedHostDetails() { + return lazyLoaded.get(); + } + public void setIncludesFile(String includesFile) { LOG.info("Setting the includes file to " + includesFile); HostDetails oldDetails = current.get(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java index 2c1f591a1f2a9..cc0ebdf8b3e39 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java @@ -55,8 +55,10 @@ public class InstrumentedLock implements Lock { // Tracking counters for lock statistics. private volatile long lockAcquireTimestamp; - private final AtomicLong lastLogTimestamp; - private final AtomicLong warningsSuppressed = new AtomicLong(0); + private final AtomicLong lastHoldLogTimestamp; + private final AtomicLong lastWaitLogTimestamp; + private final SuppressedStats holdStats = new SuppressedStats(); + private final SuppressedStats waitStats = new SuppressedStats(); /** * Create a instrumented lock instance which logs a warning message @@ -91,19 +93,24 @@ public InstrumentedLock(String name, Logger logger, Lock lock, this.logger = logger; minLoggingGap = minLoggingGapMs; lockWarningThreshold = lockWarningThresholdMs; - lastLogTimestamp = new AtomicLong( + lastHoldLogTimestamp = new AtomicLong( clock.monotonicNow() - Math.max(minLoggingGap, lockWarningThreshold)); + lastWaitLogTimestamp = new AtomicLong(lastHoldLogTimestamp.get()); } @Override public void lock() { + long waitStart = clock.monotonicNow(); lock.lock(); + check(waitStart, clock.monotonicNow(), false); startLockTiming(); } @Override public void lockInterruptibly() throws InterruptedException { + long waitStart = clock.monotonicNow(); lock.lockInterruptibly(); + check(waitStart, clock.monotonicNow(), false); startLockTiming(); } @@ -118,11 +125,14 @@ public boolean tryLock() { @Override public boolean tryLock(long time, TimeUnit unit) throws InterruptedException { + long waitStart = clock.monotonicNow(); + boolean retval = false; if (lock.tryLock(time, unit)) { startLockTiming(); - return true; + retval = true; } - return false; + check(waitStart, clock.monotonicNow(), false); + return retval; } @Override @@ -130,7 +140,7 @@ public void unlock() { long localLockReleaseTime = clock.monotonicNow(); long localLockAcquireTime = lockAcquireTimestamp; lock.unlock(); - check(localLockAcquireTime, localLockReleaseTime); + check(localLockAcquireTime, localLockReleaseTime, true); } @Override @@ -139,12 +149,25 @@ public Condition newCondition() { } @VisibleForTesting - void logWarning(long lockHeldTime, long suppressed) { + void logWarning(long lockHeldTime, SuppressedSnapshot stats) { logger.warn(String.format("Lock held time above threshold: " + "lock identifier: %s " + "lockHeldTimeMs=%d ms. Suppressed %d lock warnings. " + + "Longest suppressed LockHeldTimeMs=%d. " + "The stack trace is: %s" , - name, lockHeldTime, suppressed, + name, lockHeldTime, stats.getSuppressedCount(), + stats.getMaxSuppressedWait(), + StringUtils.getStackTrace(Thread.currentThread()))); + } + + @VisibleForTesting + void logWaitWarning(long lockWaitTime, SuppressedSnapshot stats) { + logger.warn(String.format("Waited above threshold to acquire lock: " + + "lock identifier: %s " + + "waitTimeMs=%d ms. Suppressed %d lock wait warnings. " + + "Longest suppressed WaitTimeMs=%d. " + + "The stack trace is: %s", name, lockWaitTime, + stats.getSuppressedCount(), stats.getMaxSuppressedWait(), StringUtils.getStackTrace(Thread.currentThread()))); } @@ -163,27 +186,41 @@ protected void startLockTiming() { * @param acquireTime - timestamp just after acquiring the lock. * @param releaseTime - timestamp just before releasing the lock. */ - protected void check(long acquireTime, long releaseTime) { + protected void check(long acquireTime, long releaseTime, + boolean checkLockHeld) { if (!logger.isWarnEnabled()) { return; } final long lockHeldTime = releaseTime - acquireTime; if (lockWarningThreshold - lockHeldTime < 0) { + AtomicLong lastLogTime; + SuppressedStats stats; + if (checkLockHeld) { + lastLogTime = lastHoldLogTimestamp; + stats = holdStats; + } else { + lastLogTime = lastWaitLogTimestamp; + stats = waitStats; + } long now; long localLastLogTs; do { now = clock.monotonicNow(); - localLastLogTs = lastLogTimestamp.get(); + localLastLogTs = lastLogTime.get(); long deltaSinceLastLog = now - localLastLogTs; // check should print log or not if (deltaSinceLastLog - minLoggingGap < 0) { - warningsSuppressed.incrementAndGet(); + stats.incrementSuppressed(lockHeldTime); return; } - } while (!lastLogTimestamp.compareAndSet(localLastLogTs, now)); - long suppressed = warningsSuppressed.getAndSet(0); - logWarning(lockHeldTime, suppressed); + } while (!lastLogTime.compareAndSet(localLastLogTs, now)); + SuppressedSnapshot statsSnapshot = stats.snapshot(); + if (checkLockHeld) { + logWarning(lockHeldTime, statsSnapshot); + } else { + logWaitWarning(lockHeldTime, statsSnapshot); + } } } @@ -194,4 +231,60 @@ protected Lock getLock() { protected Timer getTimer() { return clock; } + + /** + * Internal class to track statistics about suppressed log messages in an + * atomic way. + */ + private static class SuppressedStats { + private long suppressedCount = 0; + private long maxSuppressedWait = 0; + + /** + * Increments the suppressed counter and increases the max wait time if the + * passed wait is greater than the current maxSuppressedWait. + * @param wait The wait time for this suppressed message + */ + synchronized public void incrementSuppressed(long wait) { + suppressedCount++; + if (wait > maxSuppressedWait) { + maxSuppressedWait = wait; + } + } + + /** + * Captures the current value of the counts into a SuppressedSnapshot object + * and resets the values to zero. + * + * @return SuppressedSnapshot containing the current value of the counters + */ + synchronized public SuppressedSnapshot snapshot() { + SuppressedSnapshot snap = + new SuppressedSnapshot(suppressedCount, maxSuppressedWait); + suppressedCount = 0; + maxSuppressedWait = 0; + return snap; + } + } + + /** + * Immutable class to capture a snapshot of suppressed log message stats. + */ + protected static class SuppressedSnapshot { + private long suppressedCount = 0; + private long maxSuppressedWait = 0; + + public SuppressedSnapshot(long suppressedCount, long maxWait) { + this.suppressedCount = suppressedCount; + this.maxSuppressedWait = maxWait; + } + + public long getMaxSuppressedWait() { + return maxSuppressedWait; + } + + public long getSuppressedCount() { + return suppressedCount; + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java index e1157184781d9..8ab392ed041d0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java @@ -75,7 +75,7 @@ public void unlock() { getLock().unlock(); if (needReport) { readLockHeldTimeStamp.remove(); - check(localLockAcquireTime, localLockReleaseTime); + check(localLockAcquireTime, localLockReleaseTime, true); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java index a41052407697c..758f1ff87cff7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java @@ -37,7 +37,7 @@ public class InstrumentedReadWriteLock implements ReadWriteLock { private final Lock readLock; private final Lock writeLock; - InstrumentedReadWriteLock(boolean fair, String name, Logger logger, + public InstrumentedReadWriteLock(boolean fair, String name, Logger logger, long minLoggingGapMs, long lockWarningThresholdMs) { ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(fair); readLock = new InstrumentedReadLock(name, logger, readWriteLock, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java deleted file mode 100644 index f2a5b242a8d37..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java +++ /dev/null @@ -1,339 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.util; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Timer; -import java.util.TimerTask; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.service.AbstractService; -import org.apache.hadoop.util.Shell.ExitCodeException; -import org.apache.hadoop.util.Shell.ShellCommandExecutor; -import org.apache.hadoop.util.Shell; -import org.apache.hadoop.util.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * - * The class which provides functionality of checking the health of the node - * using the configured node health script and reporting back to the service - * for which the health checker has been asked to report. - */ -public class NodeHealthScriptRunner extends AbstractService { - - private static final Logger LOG = - LoggerFactory.getLogger(NodeHealthScriptRunner.class); - - /** Absolute path to the health script. */ - private String nodeHealthScript; - /** Delay after which node health script to be executed */ - private long intervalTime; - /** Time after which the script should be timedout */ - private long scriptTimeout; - /** Timer used to schedule node health monitoring script execution */ - private Timer nodeHealthScriptScheduler; - - /** ShellCommandExecutor used to execute monitoring script */ - ShellCommandExecutor shexec = null; - - /** Pattern used for searching in the output of the node health script */ - static private final String ERROR_PATTERN = "ERROR"; - - /** Time out error message */ - public static final String NODE_HEALTH_SCRIPT_TIMED_OUT_MSG = "Node health script timed out"; - - private boolean isHealthy; - - private String healthReport; - - private long lastReportedTime; - - private TimerTask timer; - - private enum HealthCheckerExitStatus { - SUCCESS, - TIMED_OUT, - FAILED_WITH_EXIT_CODE, - FAILED_WITH_EXCEPTION, - FAILED - } - - - /** - * Class which is used by the {@link Timer} class to periodically execute the - * node health script. - * - */ - private class NodeHealthMonitorExecutor extends TimerTask { - - String exceptionStackTrace = ""; - - public NodeHealthMonitorExecutor(String[] args) { - ArrayList execScript = new ArrayList(); - execScript.add(nodeHealthScript); - if (args != null) { - execScript.addAll(Arrays.asList(args)); - } - shexec = new ShellCommandExecutor(execScript - .toArray(new String[execScript.size()]), null, null, scriptTimeout); - } - - @Override - public void run() { - HealthCheckerExitStatus status = HealthCheckerExitStatus.SUCCESS; - try { - shexec.execute(); - } catch (ExitCodeException e) { - // ignore the exit code of the script - status = HealthCheckerExitStatus.FAILED_WITH_EXIT_CODE; - // On Windows, we will not hit the Stream closed IOException - // thrown by stdout buffered reader for timeout event. - if (Shell.WINDOWS && shexec.isTimedOut()) { - status = HealthCheckerExitStatus.TIMED_OUT; - } - } catch (Exception e) { - LOG.warn("Caught exception : " + e.getMessage()); - if (!shexec.isTimedOut()) { - status = HealthCheckerExitStatus.FAILED_WITH_EXCEPTION; - } else { - status = HealthCheckerExitStatus.TIMED_OUT; - } - exceptionStackTrace = StringUtils.stringifyException(e); - } finally { - if (status == HealthCheckerExitStatus.SUCCESS) { - if (hasErrors(shexec.getOutput())) { - status = HealthCheckerExitStatus.FAILED; - } - } - reportHealthStatus(status); - } - } - - /** - * Method which is used to parse output from the node health monitor and - * send to the report address. - * - * The timed out script or script which causes IOException output is - * ignored. - * - * The node is marked unhealthy if - *
    - *
  1. The node health script times out
  2. - *
  3. The node health scripts output has a line which begins with ERROR
  4. - *
  5. An exception is thrown while executing the script
  6. - *
- * If the script throws {@link IOException} or {@link ExitCodeException} the - * output is ignored and node is left remaining healthy, as script might - * have syntax error. - * - * @param status - */ - void reportHealthStatus(HealthCheckerExitStatus status) { - long now = System.currentTimeMillis(); - switch (status) { - case SUCCESS: - setHealthStatus(true, "", now); - break; - case TIMED_OUT: - setHealthStatus(false, NODE_HEALTH_SCRIPT_TIMED_OUT_MSG); - break; - case FAILED_WITH_EXCEPTION: - setHealthStatus(false, exceptionStackTrace); - break; - case FAILED_WITH_EXIT_CODE: - // see Javadoc above - we don't report bad health intentionally - setHealthStatus(true, "", now); - break; - case FAILED: - setHealthStatus(false, shexec.getOutput()); - break; - } - } - - /** - * Method to check if the output string has line which begins with ERROR. - * - * @param output - * string - * @return true if output string has error pattern in it. - */ - private boolean hasErrors(String output) { - String[] splits = output.split("\n"); - for (String split : splits) { - if (split.startsWith(ERROR_PATTERN)) { - return true; - } - } - return false; - } - } - - public NodeHealthScriptRunner(String scriptName, long chkInterval, long timeout, - String[] scriptArgs) { - super(NodeHealthScriptRunner.class.getName()); - this.lastReportedTime = System.currentTimeMillis(); - this.isHealthy = true; - this.healthReport = ""; - this.nodeHealthScript = scriptName; - this.intervalTime = chkInterval; - this.scriptTimeout = timeout; - this.timer = new NodeHealthMonitorExecutor(scriptArgs); - } - - /* - * Method which initializes the values for the script path and interval time. - */ - @Override - protected void serviceInit(Configuration conf) throws Exception { - super.serviceInit(conf); - } - - /** - * Method used to start the Node health monitoring. - * - */ - @Override - protected void serviceStart() throws Exception { - nodeHealthScriptScheduler = new Timer("NodeHealthMonitor-Timer", true); - // Start the timer task immediately and - // then periodically at interval time. - nodeHealthScriptScheduler.scheduleAtFixedRate(timer, 0, intervalTime); - super.serviceStart(); - } - - /** - * Method used to terminate the node health monitoring service. - * - */ - @Override - protected void serviceStop() { - if (nodeHealthScriptScheduler != null) { - nodeHealthScriptScheduler.cancel(); - } - if (shexec != null) { - Process p = shexec.getProcess(); - if (p != null) { - p.destroy(); - } - } - } - - /** - * Gets the if the node is healthy or not - * - * @return true if node is healthy - */ - public boolean isHealthy() { - return isHealthy; - } - - /** - * Sets if the node is healthy or not considering disks' health also. - * - * @param isHealthy - * if or not node is healthy - */ - private synchronized void setHealthy(boolean isHealthy) { - this.isHealthy = isHealthy; - } - - /** - * Returns output from health script. if node is healthy then an empty string - * is returned. - * - * @return output from health script - */ - public String getHealthReport() { - return healthReport; - } - - /** - * Sets the health report from the node health script. Also set the disks' - * health info obtained from DiskHealthCheckerService. - * - * @param healthReport - */ - private synchronized void setHealthReport(String healthReport) { - this.healthReport = healthReport; - } - - /** - * Returns time stamp when node health script was last run. - * - * @return timestamp when node health script was last run - */ - public long getLastReportedTime() { - return lastReportedTime; - } - - /** - * Sets the last run time of the node health script. - * - * @param lastReportedTime - */ - private synchronized void setLastReportedTime(long lastReportedTime) { - this.lastReportedTime = lastReportedTime; - } - - /** - * Method used to determine if or not node health monitoring service should be - * started or not. Returns true if following conditions are met: - * - *
    - *
  1. Path to Node health check script is not empty
  2. - *
  3. Node health check script file exists
  4. - *
- * - * @return true if node health monitoring service can be started. - */ - public static boolean shouldRun(String healthScript) { - if (healthScript == null || healthScript.trim().isEmpty()) { - return false; - } - File f = new File(healthScript); - return f.exists() && FileUtil.canExecute(f); - } - - private synchronized void setHealthStatus(boolean isHealthy, String output) { - LOG.info("health status being set as " + output); - this.setHealthy(isHealthy); - this.setHealthReport(output); - } - - private synchronized void setHealthStatus(boolean isHealthy, String output, - long time) { - LOG.info("health status being set as " + output); - this.setHealthStatus(isHealthy, output); - this.setLastReportedTime(time); - } - - /** - * Used only by tests to access the timer task directly - * @return the timer task - */ - public TimerTask getTimerTask() { - return timer; - } -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java index 9a0b05c369b86..2bb19460b3686 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java @@ -32,7 +32,7 @@ import org.apache.htrace.core.Span; import org.apache.htrace.core.Tracer; -import com.google.protobuf.ByteString; +import org.apache.hadoop.thirdparty.protobuf.ByteString; public abstract class ProtoUtil { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java index ff7357a175b79..1ae71d187d3ba 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java @@ -31,6 +31,8 @@ import java.lang.reflect.Method; import java.nio.charset.Charset; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -350,7 +352,13 @@ public static void cloneWritableInto(Writable dst, public static List getDeclaredFieldsIncludingInherited(Class clazz) { List fields = new ArrayList(); while (clazz != null) { - for (Field field : clazz.getDeclaredFields()) { + Field[] sortedFields = clazz.getDeclaredFields(); + Arrays.sort(sortedFields, new Comparator() { + public int compare(Field a, Field b) { + return a.getName().compareTo(b.getName()); + } + }); + for (Field field : sortedFields) { fields.add(field); } clazz = clazz.getSuperclass(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SequentialNumber.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SequentialNumber.java index 366e679e64ba5..685e92d628136 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SequentialNumber.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SequentialNumber.java @@ -45,6 +45,19 @@ public void setCurrentValue(long value) { currentValue.set(value); } + public boolean setIfGreater(long value) { + while(true) { + long local = currentValue.get(); + if(value <= local) { + return false; // swap failed + } + if(currentValue.compareAndSet(local, value)) { + return true; // swap successful + } + // keep trying + } + } + /** Increment and then return the next value. */ public long nextValue() { return currentValue.incrementAndGet(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java index 2cda8a443e4bd..f9ea3fcacd381 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java @@ -49,6 +49,34 @@ public static void sleepAtLeastIgnoreInterrupts(long millis) { } } + /** + * Join a thread as uninterruptible. + * The call continues to block until the result is available even when the + * caller thread is interrupted. + * The method will log any {@link InterruptedException} then will re-interrupt + * the thread. + * + * @param toJoin the thread to Join on. + */ + public static void joinUninterruptibly(Thread toJoin) { + boolean interrupted = false; + try { + while (true) { + try { + toJoin.join(); + return; + } catch (InterruptedException e) { + interrupted = true; + LOG.warn("interrupted while sleeping", e); + } + } + } finally { + if (interrupted) { + Thread.currentThread().interrupt(); + } + } + } + /** * Convenience method that returns a resource as inputstream from the * classpath. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java index 7c09d93d115a5..0bbceb59c31e7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java @@ -115,18 +115,18 @@ public static void shutdown(ExecutorService executorService, Logger logger, try { executorService.shutdown(); - logger.info( + logger.debug( "Gracefully shutting down executor service. Waiting max {} {}", timeout, unit); if (!executorService.awaitTermination(timeout, unit)) { - logger.info( + logger.debug( "Executor service has not shutdown yet. Forcing. " + "Will wait up to an additional {} {} for shutdown", timeout, unit); executorService.shutdownNow(); } if (executorService.awaitTermination(timeout, unit)) { - logger.info("Succesfully shutdown executor service"); + logger.debug("Succesfully shutdown executor service"); } else { logger.error("Unable to shutdown executor service after timeout {} {}", (2 * timeout), unit); diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c index 26e1fa623e859..3f141be05b549 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c @@ -42,16 +42,18 @@ #ifdef UNIX static void * (*dlsym_CRYPTO_malloc) (int, const char *, int); static void (*dlsym_CRYPTO_free) (void *); +#if OPENSSL_VERSION_NUMBER < 0x10100000L static int (*dlsym_CRYPTO_num_locks) (void); static void (*dlsym_CRYPTO_set_locking_callback) (void (*)()); static void (*dlsym_CRYPTO_set_id_callback) (unsigned long (*)()); static void (*dlsym_ENGINE_load_rdrand) (void); +static void (*dlsym_ENGINE_cleanup) (void); +#endif static ENGINE * (*dlsym_ENGINE_by_id) (const char *); static int (*dlsym_ENGINE_init) (ENGINE *); static int (*dlsym_ENGINE_set_default) (ENGINE *, unsigned int); static int (*dlsym_ENGINE_finish) (ENGINE *); static int (*dlsym_ENGINE_free) (ENGINE *); -static void (*dlsym_ENGINE_cleanup) (void); static int (*dlsym_RAND_bytes) (unsigned char *, int); static unsigned long (*dlsym_ERR_get_error) (void); #endif @@ -113,6 +115,8 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_random_OpensslSecureRandom_ dlerror(); // Clear any existing error LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_malloc, env, openssl, "CRYPTO_malloc"); LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_free, env, openssl, "CRYPTO_free"); +#if OPENSSL_VERSION_NUMBER < 0x10100000L + // pre-1.1.0 LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_num_locks, env, openssl, "CRYPTO_num_locks"); LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_set_locking_callback, \ env, openssl, "CRYPTO_set_locking_callback"); @@ -120,13 +124,14 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_random_OpensslSecureRandom_ openssl, "CRYPTO_set_id_callback"); LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_load_rdrand, env, \ openssl, "ENGINE_load_rdrand"); + LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_cleanup, env, openssl, "ENGINE_cleanup"); +#endif LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_by_id, env, openssl, "ENGINE_by_id"); LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_init, env, openssl, "ENGINE_init"); LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_set_default, env, \ openssl, "ENGINE_set_default"); LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_finish, env, openssl, "ENGINE_finish"); LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_free, env, openssl, "ENGINE_free"); - LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_cleanup, env, openssl, "ENGINE_cleanup"); LOAD_DYNAMIC_SYMBOL(dlsym_RAND_bytes, env, openssl, "RAND_bytes"); LOAD_DYNAMIC_SYMBOL(dlsym_ERR_get_error, env, openssl, "ERR_get_error"); #endif @@ -303,9 +308,11 @@ static unsigned long pthreads_thread_id(void) */ static ENGINE * openssl_rand_init(void) { +#if OPENSSL_VERSION_NUMBER < 0x10100000L locks_setup(); dlsym_ENGINE_load_rdrand(); +#endif ENGINE *eng = dlsym_ENGINE_by_id("rdrand"); int ret = -1; @@ -340,11 +347,12 @@ static void openssl_rand_clean(ENGINE *eng, int clean_locks) dlsym_ENGINE_finish(eng); dlsym_ENGINE_free(eng); } - +#if OPENSSL_VERSION_NUMBER < 0x10100000L dlsym_ENGINE_cleanup(); if (clean_locks) { locks_cleanup(); } +#endif } static int openssl_rand_bytes(unsigned char *buf, int num) diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c index b0b5151cd1238..1d7c508d85c76 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c @@ -1486,11 +1486,11 @@ JNIEnv *env, jclass thisClass, jlong address, jlong length) { /* * Class: org_apache_hadoop_io_nativeio_NativeIO_POSIX - * Method: pmemCreateMapFile + * Method: pmemMapFile * Signature: (Ljava/lang/String;J)Lorg/apache/hadoop/io/nativeio/NativeIO/POSIX/PmemMappedRegion; */ -JNIEXPORT jobject JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_pmemCreateMapFile( -JNIEnv *env, jclass thisClass, jstring filePath, jlong fileLength) { +JNIEXPORT jobject JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_pmemMapFile( +JNIEnv *env, jclass thisClass, jstring filePath, jlong fileLength, jboolean isFileExist) { #if (defined UNIX) && (defined HADOOP_PMDK_LIBRARY) /* create a pmem file and memory map it */ const char * path = NULL; @@ -1505,17 +1505,20 @@ JNIEnv *env, jclass thisClass, jstring filePath, jlong fileLength) { return NULL; } - if (fileLength <= 0) { - (*env)->ReleaseStringUTFChars(env, filePath, path); - THROW(env, "java/lang/IllegalArgumentException", "File length should be positive"); - return NULL; + if (isFileExist) { + pmemaddr = pmdkLoader->pmem_map_file(path, 0, 0, 0666, &mapped_len, &is_pmem); + } else { + if (fileLength <= 0) { + (*env)->ReleaseStringUTFChars(env, filePath, path); + THROW(env, "java/lang/IllegalArgumentException", "File length should be positive"); + return NULL; + } + pmemaddr = pmdkLoader->pmem_map_file(path, fileLength, PMEM_FILE_CREATE|PMEM_FILE_EXCL, + 0666, &mapped_len, &is_pmem); } - pmemaddr = pmdkLoader->pmem_map_file(path, fileLength, PMEM_FILE_CREATE|PMEM_FILE_EXCL, - 0666, &mapped_len, &is_pmem); - if (!pmemaddr) { - snprintf(msg, sizeof(msg), "Failed to create pmem file. file: %s, length: %x, error msg: %s", path, fileLength, pmem_errormsg()); + snprintf(msg, sizeof(msg), "Failed to map file on persistent memory.file: %s, length: %x, error msg: %s", path, fileLength, pmem_errormsg()); THROW(env, "java/io/IOException", msg); (*env)->ReleaseStringUTFChars(env, filePath, path); return NULL; diff --git a/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties b/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties index 9b74960cf72bc..6f8558b8d4fe9 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties +++ b/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties @@ -23,4 +23,4 @@ user=${user.name} date=${version-info.build.time} url=${version-info.scm.uri} srcChecksum=${version-info.source.md5} -protocVersion=${protobuf.version} +protocVersion=${hadoop.protobuf.version} diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index f0aa44dc999ca..0d583cca57cd0 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -48,6 +48,14 @@ ordering of the filters.
+ + hadoop.http.idle_timeout.ms + 1000 + + NN/JN/DN Server connection timeout in milliseconds. + + + @@ -904,7 +912,7 @@ fs.protected.directories A comma-separated list of directories which cannot - be deleted even by the superuser unless they are empty. This + be deleted or renamed even by the superuser unless they are empty. This setting can be used to guard important system directories against accidental deletion due to administrator error. @@ -1090,8 +1098,8 @@ Uses the values of fs.s3a.access.key and fs.s3a.secret.key. * com.amazonaws.auth.EnvironmentVariableCredentialsProvider: supports configuration of AWS access key ID and secret access key in - environment variables named AWS_ACCESS_KEY_ID and - AWS_SECRET_ACCESS_KEY, as documented in the AWS SDK. + environment variables named AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, + and AWS_SESSION_TOKEN as documented in the AWS SDK. * org.apache.hadoop.fs.s3a.auth.IAMInstanceCredentialsProvider: picks up IAM credentials of any EC2 VM or AWS container in which the process is running. @@ -1553,7 +1561,6 @@ - fs.s3a.s3guard.cli.prune.age 86400000 @@ -1624,6 +1631,27 @@ + + fs.s3a.s3guard.ddb.table.sse.enabled + false + + Whether server-side encryption (SSE) is enabled or disabled on the table. + By default it's disabled, meaning SSE is set to AWS owned CMK. + + + + + fs.s3a.s3guard.ddb.table.sse.cmk + + + The KMS Customer Master Key (CMK) used for the KMS encryption on the table. + To specify a CMK, this config value can be its key ID, Amazon Resource Name + (ARN), alias name, or alias ARN. Users only need to provide this config if + the key is different from the default DynamoDB KMS Master Key, which is + alias/aws/dynamodb. + + + fs.s3a.s3guard.ddb.max.retries 9 @@ -1678,7 +1706,7 @@ fs.s3a.retry.throttle.limit - ${fs.s3a.attempts.maximum} + 20 Number of times to retry any throttled request. @@ -1686,9 +1714,12 @@ fs.s3a.retry.throttle.interval - 1000ms + 100ms - Interval between retry attempts on throttled requests. + Initial between retry attempts on throttled requests, +/- 50%. chosen at random. + i.e. for an intial value of 3000ms, the initial delay would be in the range 1500ms to 4500ms. + Backoffs are exponential; again randomness is used to avoid the thundering heard problem. + 500ms is the default value used by the AWS S3 Retry policy. @@ -1920,6 +1951,23 @@ + + fs.s3a.connection.request.timeout + 0 + + Time out on HTTP requests to the AWS service; 0 means no timeout. + Measured in seconds; the usual time suffixes are all supported + + Important: this is the maximum duration of any AWS service call, + including upload and copy operations. If non-zero, it must be larger + than the time to upload multi-megabyte blocks to S3 from the client, + and to rename many-GB files. Use with care. + + Values that are larger than Integer.MAX_VALUE milliseconds are + converged to Integer.MAX_VALUE milliseconds + + + fs.s3a.etag.checksum.enabled false @@ -1979,11 +2027,16 @@ If secure connections to S3 are enabled, configures the SSL implementation used to encrypt connections to S3. Supported values are: - "default_jsse" and "default_jsse_with_gcm". "default_jsse" uses the Java - Secure Socket Extension package (JSSE). However, when running on Java 8, - the GCM cipher is removed from the list of enabled ciphers. This is due - to performance issues with GCM in Java 8. "default_jsse_with_gcm" uses - the JSSE with the default list of cipher suites. + "default_jsse", "default_jsse_with_gcm", "default", and "openssl". + "default_jsse" uses the Java Secure Socket Extension package (JSSE). + However, when running on Java 8, the GCM cipher is removed from the list + of enabled ciphers. This is due to performance issues with GCM in Java 8. + "default_jsse_with_gcm" uses the JSSE with the default list of cipher + suites. "default_jsse_with_gcm" is equivalent to the behavior prior to + this feature being introduced. "default" attempts to use OpenSSL rather + than the JSSE for SSL encryption, if OpenSSL libraries cannot be loaded, + it falls back to the "default_jsse" behavior. "openssl" attempts to use + OpenSSL as well, but fails if OpenSSL libraries cannot be loaded. @@ -2674,7 +2727,7 @@ ${user.home}/hadoop-http-auth-signature-secret The signature secret for signing the authentication tokens. - The same secret should be used for JT/NN/DN/TT configurations. + The same secret should be used for RM/NM/NN/DN configurations. @@ -2929,6 +2982,7 @@ The supported SSL protocols. The parameter will only be used from DatanodeHttpServer. + Starting from Hadoop 3.3.0, TLSv1.3 is supported with Java 11 Runtime. @@ -3018,6 +3072,14 @@ + + ha.failover-controller.active-standby-elector.zk.op.retries + 3 + + The number of zookeeper operation retry times in ActiveStandbyElector + + + ha.failover-controller.cli-check.rpc-timeout.ms 20000 @@ -3782,4 +3844,21 @@ fs space usage statistics refresh jitter in msec. + + + hadoop.http.sni.host.check.enabled + false + + Enable Server Name Indication (SNI) host check for HTTPS enabled server. + + + + + hadoop.metrics.jvm.use-thread-mxbean + false + + Whether or not ThreadMXBean is used for getting thread info in JvmMetrics, + ThreadGroup approach is preferred for better performance. + + diff --git a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties index cbbb88764d1f8..dc37949851cf5 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties +++ b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties @@ -19,7 +19,7 @@ # contains key properties for setting up the application classloader system.classes.default=java.,\ javax.accessibility.,\ - javax.activation.,\ + -javax.activation.,\ javax.activity.,\ javax.annotation.,\ javax.annotation.processing.,\ diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md index 7be6a192c9c57..7f61d3bd45592 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md @@ -27,7 +27,7 @@ This document does not cover advanced topics such as [Security](./SecureMode.htm Prerequisites ------------- -* Install Java. See the [Hadoop Wiki](http://wiki.apache.org/hadoop/HadoopJavaVersions) for known good versions. +* Install Java. See the [Hadoop Wiki](https://cwiki.apache.org/confluence/display/HADOOP/Hadoop+Java+Versions) for known good versions. * Download a stable version of Hadoop from Apache mirrors. Installation diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/DownstreamDev.md b/hadoop-common-project/hadoop-common/src/site/markdown/DownstreamDev.md index b47f83bbadf50..b04bc2488f8ae 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/DownstreamDev.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/DownstreamDev.md @@ -430,4 +430,3 @@ please contact the developer mailing list for the relevant component(s): * [hdfs-dev](mailto:hdfs-dev@hadoop.apache.org) * [mapreduce-dev](mailto:mapreduce-dev@hadoop.apache.org) * [yarn-dev](mailto:yarn-dev@hadoop.apache.org) -* [submarine-dev](mailto:submarine-dev@hadoop.apache.org) diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FairCallQueue.md b/hadoop-common-project/hadoop-common/src/site/markdown/FairCallQueue.md index 22ac05a53b951..887d3053d263e 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/FairCallQueue.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/FairCallQueue.md @@ -126,6 +126,7 @@ omitted. |:---- |:---- |:---- |:--- | | backoff.enable | General | Whether or not to enable client backoff when a queue is full. | false | | callqueue.impl | General | The fully qualified name of a class to use as the implementation of a call queue. Use `org.apache.hadoop.ipc.FairCallQueue` for the Fair Call Queue. | `java.util.concurrent.LinkedBlockingQueue` (FIFO queue) | +| callqueue.capacity.weights | General | The capacity allocation weights among all subqueues. A postive int array whose length is equal to the `scheduler.priority.levels` is expected where each int is the relative weight out of total capacity. i.e. if a queue with capacity weight `w`, its queue capacity is `capacity * w/sum(weights)` | | scheduler.impl | General | The fully qualified name of a class to use as the implementation of the scheduler. Use `org.apache.hadoop.ipc.DecayRpcScheduler` in conjunction with the Fair Call Queue. | `org.apache.hadoop.ipc.DefaultRpcScheduler` (no-op scheduler)
If using FairCallQueue, defaults to `org.apache.hadoop.ipc.DecayRpcScheduler` | | scheduler.priority.levels | RpcScheduler, CallQueue | How many priority levels to use within the scheduler and call queue. | 4 | | faircallqueue.multiplexer.weights | WeightedRoundRobinMultiplexer | How much weight to give to each priority queue. This should be a comma-separated list of length equal to the number of priority levels. | Weights descend by a factor of 2 (e.g., for 4 levels: `8,4,2,1`) | @@ -151,6 +152,10 @@ processed. ipc.8020.callqueue.impl org.apache.hadoop.ipc.FairCallQueue + + ipc.8020.callqueue.capacity.weights + 7,3 + ipc.8020.scheduler.impl org.apache.hadoop.ipc.DecayRpcScheduler diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md index 7df2cce574b68..9c5586913d900 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md @@ -122,30 +122,17 @@ Options copyFromLocal ------------- -Usage: `hadoop fs -copyFromLocal URI` - -Similar to the `fs -put` command, except that the source is restricted to a local file reference. - -Options: - -* `-p` : Preserves access and modification times, ownership and the permissions. -(assuming the permissions can be propagated across filesystems) -* `-f` : Overwrites the destination if it already exists. -* `-l` : Allow DataNode to lazily persist the file to disk, Forces a replication - factor of 1. This flag will result in reduced durability. Use with care. -* `-d` : Skip creation of temporary file with the suffix `._COPYING_`. +Identical to the -put command. copyToLocal ----------- -Usage: `hadoop fs -copyToLocal [-ignorecrc] [-crc] URI ` - -Similar to get command, except that the destination is restricted to a local file reference. +Identical to the -get command. count ----- -Usage: `hadoop fs -count [-q] [-h] [-v] [-x] [-t []] [-u] [-e] ` +Usage: `hadoop fs -count [-q] [-h] [-v] [-x] [-t []] [-u] [-e] [-s] ` Count the number of directories, files and bytes under the paths that match the specified file pattern. Get the quota and the usage. The output columns with -count are: DIR\_COUNT, FILE\_COUNT, CONTENT\_SIZE, PATHNAME @@ -169,6 +156,8 @@ The output columns with -count -e are: DIR\_COUNT, FILE\_COUNT, CONTENT_SIZE, ER The ERASURECODING\_POLICY is name of the policy for the file. If a erasure coding policy is setted on that file, it will return name of the policy. If no erasure coding policy is setted, it will return \"Replicated\" which means it use replication storage strategy. +The -s option shows the snapshot counts for each directory. + Example: * `hadoop fs -count hdfs://nn1.example.com/file1 hdfs://nn2.example.com/file2` @@ -179,6 +168,7 @@ Example: * `hadoop fs -count -u -h hdfs://nn1.example.com/file1` * `hadoop fs -count -u -h -v hdfs://nn1.example.com/file1` * `hadoop fs -count -e hdfs://nn1.example.com/file1` +* `hadoop fs -count -s hdfs://nn1.example.com/file1` Exit Code: @@ -535,7 +525,7 @@ Returns 0 on success and -1 on error. put --- -Usage: `hadoop fs -put [-f] [-p] [-l] [-d] [ - | .. ]. ` +Usage: `hadoop fs -put [-f] [-p] [-l] [-d] [-t ] [ - | .. ]. ` Copy single src, or multiple srcs from local file system to the destination file system. Also reads input from stdin and writes to destination file system if the source is set to "-" @@ -547,6 +537,8 @@ Options: * `-p` : Preserves access and modification times, ownership and the permissions. (assuming the permissions can be propagated across filesystems) * `-f` : Overwrites the destination if it already exists. +* `-t ` : Number of threads to be used, default is 1. Useful + when uploading a directory containing more than 1 file. * `-l` : Allow DataNode to lazily persist the file to disk, Forces a replication factor of 1. This flag will result in reduced durability. Use with care. * `-d` : Skip creation of temporary file with the suffix `._COPYING_`. diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md index 2d0f23293bfa3..43a3f331f0f30 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md @@ -56,6 +56,7 @@ Each metrics record contains tags such as ProcessName, SessionID and Hostname as | `GcNumWarnThresholdExceeded` | Number of times that the GC warn threshold is exceeded | | `GcNumInfoThresholdExceeded` | Number of times that the GC info threshold is exceeded | | `GcTotalExtraSleepTime` | Total GC extra sleep time in msec | +| `GcTimePercentage` | The percentage (0..100) of time that the JVM spent in GC pauses within the observation window if `dfs.namenode.gc.time.monitor.enable` is set to true. Use `dfs.namenode.gc.time.monitor.sleep.interval.ms` to specify the sleep interval in msec. Use `dfs.namenode.gc.time.monitor.observation.window.ms` to specify the observation window in msec. | rpc context =========== @@ -122,6 +123,17 @@ FairCallQueue metrics will only exist if FairCallQueue is enabled. Each metric e | `FairCallQueueSize_p`*Priority* | Current number of calls in priority queue | | `FairCallQueueOverflowedCalls_p`*Priority* | Total number of overflowed calls in priority queue | +DecayRpcSchedulerDetailed +------------------------- + +DecayRpcSchedulerDetailed metrics only exist when DecayRpcScheduler is used (FairCallQueue enabled). It is an addition +to FairCallQueue metrics. For each level of priority, rpcqueue and rpcprocessing detailed metrics are exposed. + +| Name | Description | +|:---- | :---- | +| `DecayRPCSchedulerPriority.`*Priority*`.RpcQueueTime` | RpcQueueTime metrics for each priority | +| `DecayRPCSchedulerPriority.`*Priority*`.RpcProcessingTime` | RpcProcessingTime metrics for each priority | + rpcdetailed context =================== @@ -441,6 +453,22 @@ Each metrics record contains tags such as SessionId and Hostname as additional i | `EcReconstructionBytesRead` | Total number of bytes read by erasure coding worker | | `EcReconstructionBytesWritten` | Total number of bytes written by erasure coding worker | | `EcReconstructionRemoteBytesRead` | Total number of bytes remote read by erasure coding worker | +| `CreateRbwOpNumOps` | Total number of create rbw operations | +| `CreateRbwOpAvgTime` | Average time of create rbw operations in milliseconds | +| `RecoverRbwOpNumOps` | Total number of recovery rbw operations | +| `RecoverRbwOpAvgTime` | Average time of recovery rbw operations in milliseconds | +| `ConvertTemporaryToRbwOpNumOps` | Total number of convert temporary to rbw operations | +| `ConvertTemporaryToRbwOpAvgTime` | Average time of convert temporary to rbw operations in milliseconds | +| `CreateTemporaryOpNumOps` | Total number of create temporary operations | +| `CreateTemporaryOpAvgTime` | Average time of create temporary operations in milliseconds | +| `FinalizeBlockOpNumOps` | Total number of finalize block operations | +| `FinalizeBlockOpAvgTime` | Average time of finalize block operations in milliseconds | +| `UnfinalizeBlockOpNumOps` | Total number of un-finalize block operations | +| `UnfinalizeBlockOpAvgTime` | Average time of un-finalize block operations in milliseconds | +| `CheckAndUpdateOpNumOps` | Total number of check and update operations | +| `CheckAndUpdateOpAvgTime` | Average time of check and update operations in milliseconds | +| `UpdateReplicaUnderRecoveryOpNumOps` | Total number of update replica under recovery operations | +| `UpdateReplicaUnderRecoveryOpAvgTime` | Average time of update replica under recovery operations in milliseconds | FsVolume -------- diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/RackAwareness.md b/hadoop-common-project/hadoop-common/src/site/markdown/RackAwareness.md index 89946d18308a0..9b3b1d78ed126 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/RackAwareness.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/RackAwareness.md @@ -64,7 +64,7 @@ rack and is unable to do so as there is only a single rack named python Example -------------- ```python -#!/usr/bin/python +#!/usr/bin/python3 # this script makes assumptions about the physical environment. # 1) each rack is its own layer 3 network with a /24 subnet, which # could be typical where each rack has its own @@ -94,9 +94,9 @@ for ip in sys.argv: # loop over lis address = '{0}/{1}'.format(ip, netmask) # format address string so it looks like 'ip/netmask' to make netaddr work try: network_address = netaddr.IPNetwork(address).network # calculate and print network address - print "/{0}".format(network_address) + print("/{0}".format(network_address)) except: - print "/rack-unknown" # print catch-all value if unable to calculate network address + print("/rack-unknown") # print catch-all value if unable to calculate network address ``` bash Example diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm index 18fb52dd55c09..45c084bb543be 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm +++ b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm @@ -39,7 +39,7 @@ $H3 Required Software Required software for Linux include: -1. Java™ must be installed. Recommended Java versions are described at [HadoopJavaVersions](http://wiki.apache.org/hadoop/HadoopJavaVersions). +1. Java™ must be installed. Recommended Java versions are described at [HadoopJavaVersions](https://cwiki.apache.org/confluence/display/HADOOP/Hadoop+Java+Versions). 2. ssh must be installed and sshd must be running to use the Hadoop scripts that manage remote Hadoop daemons if the optional start and stop scripts are to be used. Additionally, it is recommmended that pdsh also be installed for better ssh resource management. @@ -130,7 +130,7 @@ If you cannot ssh to localhost without a passphrase, execute the following comma $H3 Execution -The following instructions are to run a MapReduce job locally. If you want to execute a job on YARN, see [YARN on Single Node](#YARN_on_Single_Node). +The following instructions are to run a MapReduce job locally. If you want to execute a job on YARN, see [YARN on Single Node](#YARN_on_a_Single_Node). 1. Format the filesystem: diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md index a2458ee891448..665e328447d5b 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md @@ -43,6 +43,15 @@ The implementations of `FileSystem` shipped with Apache Hadoop All the requirements of a valid FileSystem are considered implicit preconditions and postconditions: all operations on a valid FileSystem MUST result in a new FileSystem that is also valid. +## Feasible features + +### Protected directories + +HDFS has the notion of *Protected Directories*, which are declared in +the option `fs.protected.directories`. Any attempt to delete or rename +such a directory or a parent thereof raises an `AccessControlException`. +Accordingly, any attempt to delete the root directory SHALL, if there is +a protected directory, result in such an exception being raised. ## Predicates and other state access operations @@ -477,11 +486,11 @@ running out of memory as it calculates the partitions. Any FileSystem that does not actually break files into blocks SHOULD return a number for this that results in efficient processing. -A FileSystem MAY make this user-configurable (the S3 and Swift filesystem clients do this). +A FileSystem MAY make this user-configurable (the object store connectors usually do this). ### `long getDefaultBlockSize(Path p)` -Get the "default" block size for a path —that is, the block size to be used +Get the "default" block size for a path --that is, the block size to be used when writing objects to a path in the filesystem. #### Preconditions @@ -530,14 +539,21 @@ on the filesystem. ### `boolean mkdirs(Path p, FsPermission permission)` -Create a directory and all its parents +Create a directory and all its parents. #### Preconditions +The path must either be a directory or not exist + if exists(FS, p) and not isDir(FS, p) : raise [ParentNotDirectoryException, FileAlreadyExistsException, IOException] +No ancestor may be a file + + forall d = ancestors(FS, p) : + if exists(FS, d) and not isDir(FS, d) : + raise [ParentNotDirectoryException, FileAlreadyExistsException, IOException] #### Postconditions @@ -577,6 +593,11 @@ Writing to or overwriting a directory must fail. if isDir(FS, p) : raise {FileAlreadyExistsException, FileNotFoundException, IOException} +No ancestor may be a file + + forall d = ancestors(FS, p) : + if exists(FS, d) and not isDir(FS, d) : + raise [ParentNotDirectoryException, FileAlreadyExistsException, IOException] FileSystems may reject the request for other reasons, such as the FS being read-only (HDFS), @@ -584,7 +605,8 @@ the block size being below the minimum permitted (HDFS), the replication count being out of range (HDFS), quotas on namespace or filesystem being exceeded, reserved names, etc. All rejections SHOULD be `IOException` or a subclass thereof -and MAY be a `RuntimeException` or subclass. For instance, HDFS may raise a `InvalidPathException`. +and MAY be a `RuntimeException` or subclass. +For instance, HDFS may raise an `InvalidPathException`. #### Postconditions @@ -709,24 +731,29 @@ exists in the metadata, but no copies of any its blocks can be located; Creates a [`FSDataInputStreamBuilder`](fsdatainputstreambuilder.html) to construct a operation to open the file at `path` for reading. - When `build()` is invoked on the returned `FSDataInputStreamBuilder` instance, the builder parameters are verified and -`openFileWithOptions(Path, Set, Configuration, int)` invoked. +`openFileWithOptions(Path, OpenFileParameters)` invoked. This (protected) operation returns a `CompletableFuture` which, when its `get()` method is called, either returns an input stream of the contents of opened file, or raises an exception. -The base implementation of the `openFileWithOptions(PathHandle, Set, Configuration, int)` +The base implementation of the `openFileWithOptions(PathHandle, OpenFileParameters)` ultimately invokes `open(Path, int)`. Thus the chain `openFile(path).build().get()` has the same preconditions and postconditions as `open(Path p, int bufferSize)` +However, there is one difference which implementations are free to +take advantage of: + +The returned stream MAY implement a lazy open where file non-existence or +access permission failures may not surface until the first `read()` of the +actual data. -The `openFile()` operation may check the state of the filesystem during this -call, but as the state of the filesystem may change betwen this call and +The `openFile()` operation may check the state of the filesystem during its +invocation, but as the state of the filesystem may change betwen this call and the actual `build()` and `get()` operations, this file-specific preconditions (file exists, file is readable, etc) MUST NOT be checked here. @@ -757,6 +784,10 @@ It SHOULD be possible to always open a file without specifying any options, so as to present a consistent model to users. However, an implementation MAY opt to require one or more mandatory options to be set. +The returned stream may perform "lazy" evaluation of file access. This is +relevant for object stores where the probes for existence are expensive, and, +even with an asynchronous open, may be considered needless. + ### `FSDataInputStreamBuilder openFile(PathHandle)` Creates a `FSDataInputStreamBuilder` to build an operation to open a file. @@ -765,13 +796,13 @@ to construct a operation to open the file identified by the given `PathHandle` f When `build()` is invoked on the returned `FSDataInputStreamBuilder` instance, the builder parameters are verified and -`openFileWithOptions(PathHandle, Set, Configuration, int)` invoked. +`openFileWithOptions(PathHandle, OpenFileParameters)` invoked. This (protected) operation returns a `CompletableFuture` which, when its `get()` method is called, either returns an input stream of the contents of opened file, or raises an exception. -The base implementation of the `openFileWithOptions(Path,PathHandle, Set, Configuration, int)` method +The base implementation of the `openFileWithOptions(PathHandle, OpenFileParameters)` method returns a future which invokes `open(Path, int)`. Thus the chain `openFile(pathhandle).build().get()` has the same preconditions @@ -1009,12 +1040,6 @@ filesystem is desired. 1. Object Stores: see [Object Stores: root directory deletion](#object-stores-rm-root). -HDFS has the notion of *Protected Directories*, which are declared in -the option `fs.protected.directories`. Any attempt to delete such a directory -or a parent thereof raises an `AccessControlException`. Accordingly, any -attempt to delete the root directory SHALL, if there is a protected directory, -result in such an exception being raised. - This specification does not recommend any specific action. Do note, however, that the POSIX model assumes that there is a permissions model such that normal users do not have the permission to delete that root directory; it is an action diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstreambuilder.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstreambuilder.md index a7c393d9a41c1..eadba174fc1a6 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstreambuilder.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstreambuilder.md @@ -43,6 +43,31 @@ path validation. Set the size of the buffer to be used. +### `FSDataInputStreamBuilder withFileStatus(FileStatus status)` + +A `FileStatus` instance which refers to the file being opened. + +This MAY be used by implementations to short-circuit checks for the file, +So potentially saving on remote calls especially to object stores. + +Requirements: + +* `status != null` +* `status.getPath()` == the resolved path of the file being opened. + +The path validation MUST take place if the store uses the `FileStatus` when +it opens files, and MAY be performed otherwise. The validation +SHOULD be postponed until the `build()` operation. + +This operation should be considered a hint to the filesystem. + +If a filesystem implementation extends the `FileStatus` returned in its +implementation MAY use this information when opening the file. + +This is relevant with those stores which return version/etag information, +including the S3A and ABFS connectors -they MAY use this to guarantee that +the file they opened is exactly the one returned in the listing. + ### Set optional or mandatory parameters FSDataInputStreamBuilder opt(String key, ...) @@ -56,6 +81,7 @@ of `FileSystem`. out = fs.openFile(path) .opt("fs.s3a.experimental.input.fadvise", "random") .must("fs.s3a.readahead.range", 256 * 1024) + .withFileStatus(statusFromListing) .build() .get(); ``` @@ -76,6 +102,21 @@ builder methods (i.e., `bufferSize()`) and `opt()`/`must()` is as follows: > The last option specified defines the value and its optional/mandatory state. +If the `FileStatus` option passed in `withFileStatus()` is used, implementations +MUST accept all subclasses of `FileStatus`, including `LocatedFileStatus`, +rather than just any FS-specific subclass implemented by the implementation +(e.g `S3AFileStatus`). They MAY simply ignore those which are not the +custom subclasses. + +This is critical to ensure safe use of the feature: directory listing/ +status serialization/deserialization can result result in the `withFileStatus()` +argumennt not being the custom subclass returned by the Filesystem instance's +own `getFileStatus()`, `listFiles()`, `listLocatedStatus()` calls, etc. + +In such a situation the implementations must: + +1. Validate the path (always). +1. Use the status/convert to the custom type, *or* simply discard it. ## Builder interface diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java index 3a4bccede069f..1ce23a0eb81f2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java @@ -135,6 +135,30 @@ public void initializeMemberVariables() { xmlPropsToSkipCompare.add("fs.azure.saskey.usecontainersaskeyforallaccess"); xmlPropsToSkipCompare.add("fs.azure.user.agent.prefix"); + // FairCallQueue configs that includes dynamic ports in its keys + xmlPropsToSkipCompare.add("ipc.[port_number].backoff.enable"); + xmlPropsToSkipCompare.add("ipc.[port_number].callqueue.impl"); + xmlPropsToSkipCompare.add("ipc.[port_number].scheduler.impl"); + xmlPropsToSkipCompare.add("ipc.[port_number].scheduler.priority.levels"); + xmlPropsToSkipCompare.add( + "ipc.[port_number].faircallqueue.multiplexer.weights"); + xmlPropsToSkipCompare.add("ipc.[port_number].identity-provider.impl"); + xmlPropsToSkipCompare.add("ipc.[port_number].cost-provider.impl"); + xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.period-ms"); + xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.decay-factor"); + xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.thresholds"); + xmlPropsToSkipCompare.add( + "ipc.[port_number].decay-scheduler.backoff.responsetime.enable"); + xmlPropsToSkipCompare.add( + "ipc.[port_number].decay-scheduler.backoff.responsetime.thresholds"); + xmlPropsToSkipCompare.add( + "ipc.[port_number].decay-scheduler.metrics.top.user.count"); + xmlPropsToSkipCompare.add("ipc.[port_number].weighted-cost.lockshared"); + xmlPropsToSkipCompare.add("ipc.[port_number].weighted-cost.lockexclusive"); + xmlPropsToSkipCompare.add("ipc.[port_number].weighted-cost.handler"); + xmlPropsToSkipCompare.add("ipc.[port_number].weighted-cost.lockfree"); + xmlPropsToSkipCompare.add("ipc.[port_number].weighted-cost.response"); + // Deprecated properties. These should eventually be removed from the // class. configurationPropsToSkipCompare @@ -202,6 +226,6 @@ public void initializeMemberVariables() { // - org.apache.hadoop.io.SequenceFile xmlPropsToSkipCompare.add("io.seqfile.local.dir"); - + xmlPropsToSkipCompare.add("hadoop.http.sni.host.check.enabled"); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index a6adb9f20a3ef..81c53959478b4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -2553,4 +2553,41 @@ public void testResourceRace() { // Thread 1 config.get("secondParse"); } + + @Test + public void testCDATA() throws IOException { + String xml = new String( + "" + + "" + + "cdata" + + "cdata]]>" + + "\n" + + "" + + "cdata-multiple" + + "cdata1]]> and cdata2]]>" + + "\n" + + "" + + "cdata-multiline" + + "cdata\nmultiline<>]]>" + + "\n" + + "" + + "cdata-whitespace" + + " prefix cdata]]>\nsuffix " + + "\n" + + ""); + Configuration conf = checkCDATA(xml.getBytes()); + ByteArrayOutputStream os = new ByteArrayOutputStream(); + conf.writeXml(os); + checkCDATA(os.toByteArray()); + } + + private static Configuration checkCDATA(byte[] bytes) { + Configuration conf = new Configuration(false); + conf.addResource(new ByteArrayInputStream(bytes)); + assertEquals(">cdata", conf.get("cdata")); + assertEquals(">cdata1 and >cdata2", conf.get("cdata-multiple")); + assertEquals(">cdata\nmultiline<>", conf.get("cdata-multiline")); + assertEquals(" prefix >cdata\nsuffix ", conf.get("cdata-whitespace")); + return conf; + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java index a4ccee3f7f58e..8065b3f61f52c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java @@ -862,7 +862,8 @@ private void assertListFilesFinds(Path dir, Path subdir) throws IOException { found); } - private void assertListStatusFinds(Path dir, Path subdir) throws IOException { + protected void assertListStatusFinds(Path dir, Path subdir) + throws IOException { FileStatus[] stats = fs.listStatus(dir); boolean found = false; StringBuilder builder = new StringBuilder(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java index 7cc7ae4094974..98f9f2021f8b4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java @@ -253,4 +253,40 @@ public void testToStringHumanNoShowQuota() { String expected = " 32.6 K 211.9 M 8.0 E "; assertEquals(expected, contentSummary.toString(false, true)); } + + // check the toSnapshot method with human readable. + @Test + public void testToSnapshotHumanReadable() { + long snapshotLength = Long.MAX_VALUE; + long snapshotFileCount = 222222222; + long snapshotDirectoryCount = 33333; + long snapshotSpaceConsumed = 222256578; + + ContentSummary contentSummary = new ContentSummary.Builder() + .snapshotLength(snapshotLength).snapshotFileCount(snapshotFileCount) + .snapshotDirectoryCount(snapshotDirectoryCount) + .snapshotSpaceConsumed(snapshotSpaceConsumed).build(); + String expected = + " 8.0 E 211.9 M 32.6 K " + + " 212.0 M "; + assertEquals(expected, contentSummary.toSnapshot(true)); + } + + // check the toSnapshot method with human readable disabled. + @Test + public void testToSnapshotNotHumanReadable() { + long snapshotLength = 1111; + long snapshotFileCount = 2222; + long snapshotDirectoryCount = 3333; + long snapshotSpaceConsumed = 4444; + + ContentSummary contentSummary = new ContentSummary.Builder() + .snapshotLength(snapshotLength).snapshotFileCount(snapshotFileCount) + .snapshotDirectoryCount(snapshotDirectoryCount) + .snapshotSpaceConsumed(snapshotSpaceConsumed).build(); + String expected = + " 1111 2222 3333 " + + " 4444 "; + assertEquals(expected, contentSummary.toSnapshot(false)); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java index 96fac57518bfd..2919de20bffd9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java @@ -61,8 +61,8 @@ public void testFileContextResolveAfs() throws IOException { fc.createSymlink(localPath, linkPath, true); Set afsList = fc.resolveAbstractFileSystems(linkPath); Assert.assertEquals(1, afsList.size()); - localFs.deleteOnExit(localPath); - localFs.deleteOnExit(linkPath); + localFs.delete(linkPath, true); + localFs.delete(localPath, true); localFs.close(); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java index 4d627a5e8e256..10ad8a14487ef 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java @@ -18,14 +18,24 @@ package org.apache.hadoop.fs; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.util.Progressable; +import java.io.FileNotFoundException; import java.io.IOException; +import java.net.URI; import java.net.URL; import java.util.ServiceConfigurationError; import org.junit.Test; + +import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.*; +/** + * Tests related to filesystem creation and lifecycle. + */ public class TestFileSystemInitialization { /** @@ -55,4 +65,119 @@ public void testMissingLibraries() { } catch (Exception | ServiceConfigurationError expected) { } } + + @Test + public void testNewInstanceFailure() throws Throwable { + intercept(IOException.class, FailingFileSystem.INITIALIZE, () -> + FileSystem.newInstance(new URI("failing://localhost"), FailingFileSystem + .failingConf())); + assertThat(FailingFileSystem.initCount).describedAs("init count") + .isEqualTo(1); + assertThat(FailingFileSystem.closeCount).describedAs("close count") + .isEqualTo(1); + } + + /** + * An FS which will fail on both init and close, and update + * counters of invocations as it does so. + */ + public static class FailingFileSystem extends FileSystem { + + public static final String INITIALIZE = "initialize()"; + + public static final String CLOSE = "close()"; + + private static int initCount; + + private static int closeCount; + + private static Configuration failingConf() { + final Configuration conf = new Configuration(false); + conf.setClass("fs.failing.impl", FailingFileSystem.class, + FileSystem.class); + return conf; + } + + @Override + public void initialize(final URI name, final Configuration conf) + throws IOException { + super.initialize(name, conf); + initCount++; + throw new IOException(INITIALIZE); + } + + @Override + public void close() throws IOException { + closeCount++; + throw new IOException(CLOSE); + } + + @Override + public URI getUri() { + return null; + } + + @Override + public FSDataInputStream open(final Path f, final int bufferSize) + throws IOException { + return null; + } + + @Override + public FSDataOutputStream create(final Path f, + final FsPermission permission, + final boolean overwrite, + final int bufferSize, + final short replication, + final long blockSize, + final Progressable progress) throws IOException { + return null; + } + + @Override + public FSDataOutputStream append(final Path f, + final int bufferSize, + final Progressable progress) throws IOException { + return null; + } + + @Override + public boolean rename(final Path src, final Path dst) throws IOException { + return false; + } + + @Override + public boolean delete(final Path f, final boolean recursive) + throws IOException { + return false; + } + + @Override + public FileStatus[] listStatus(final Path f) + throws FileNotFoundException, IOException { + return new FileStatus[0]; + } + + @Override + public void setWorkingDirectory(final Path new_dir) { + + } + + @Override + public Path getWorkingDirectory() { + return null; + } + + @Override + public boolean mkdirs(final Path f, final FsPermission permission) + throws IOException { + return false; + } + + @Override + public FileStatus getFileStatus(final Path f) throws IOException { + return null; + } + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java index 5d22a6a2a4896..1ca1f241e5e9d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs; import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; +import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; @@ -44,6 +45,7 @@ import java.nio.file.Files; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.jar.Attributes; @@ -64,22 +66,38 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Ignore; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.TemporaryFolder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class TestFileUtil { private static final Logger LOG = LoggerFactory.getLogger(TestFileUtil.class); - private static final File TEST_DIR = GenericTestUtils.getTestDir("fu"); + @Rule + public TemporaryFolder testFolder = new TemporaryFolder(); + private static final String FILE = "x"; private static final String LINK = "y"; private static final String DIR = "dir"; - private final File del = new File(TEST_DIR, "del"); - private final File tmp = new File(TEST_DIR, "tmp"); - private final File dir1 = new File(del, DIR + "1"); - private final File dir2 = new File(del, DIR + "2"); - private final File partitioned = new File(TEST_DIR, "partitioned"); + + private static final String FILE_1_NAME = "file1"; + + private File del; + private File tmp; + private File dir1; + private File dir2; + private File partitioned; + + private File xSubDir; + private File xSubSubDir; + private File ySubDir; + + private File file2; + private File file22; + private File file3; + private File zlink; private InetAddress inet1; private InetAddress inet2; @@ -116,21 +134,34 @@ public class TestFileUtil { * file: part-r-00000, contents: "foo" * file: part-r-00001, contents: "bar" */ - @Ignore - private void setupDirs() throws IOException { - Assert.assertFalse(del.exists()); - Assert.assertFalse(tmp.exists()); - Assert.assertFalse(partitioned.exists()); - del.mkdirs(); - tmp.mkdirs(); - partitioned.mkdirs(); + @Before + public void setup() throws IOException { + del = testFolder.newFolder("del"); + tmp = testFolder.newFolder("tmp"); + partitioned = testFolder.newFolder("partitioned"); + + zlink = new File(del, "zlink"); + + xSubDir = new File(del, "xSubDir"); + xSubSubDir = new File(xSubDir, "xSubSubDir"); + ySubDir = new File(del, "ySubDir"); + + + file2 = new File(xSubDir, "file2"); + file22 = new File(xSubSubDir, "file22"); + file3 = new File(ySubDir, "file3"); + + dir1 = new File(del, DIR + "1"); + dir2 = new File(del, DIR + "2"); + + FileUtils.forceMkdir(dir1); + FileUtils.forceMkdir(dir2); + new File(del, FILE).createNewFile(); File tmpFile = new File(tmp, FILE); tmpFile.createNewFile(); - // create directories - dir1.mkdirs(); - dir2.mkdirs(); + // create files new File(dir1, FILE).createNewFile(); new File(dir2, FILE).createNewFile(); @@ -151,6 +182,11 @@ private void setupDirs() throws IOException { FileUtil.symLink(del.toString(), dir1.toString() + "/cycle"); } + @After + public void tearDown() throws IOException { + testFolder.delete(); + } + /** * Creates a new file in the specified directory, with the specified name and * the specified file contents. This method will add a newline terminator to @@ -175,7 +211,6 @@ private File createFile(File directory, String name, String contents) @Test (timeout = 30000) public void testListFiles() throws IOException { - setupDirs(); //Test existing files case File[] files = FileUtil.listFiles(partitioned); Assert.assertEquals(2, files.length); @@ -202,7 +237,6 @@ public void testListFiles() throws IOException { @Test (timeout = 30000) public void testListAPI() throws IOException { - setupDirs(); //Test existing files case String[] files = FileUtil.list(partitioned); Assert.assertEquals("Unexpected number of pre-existing files", 2, files.length); @@ -227,30 +261,8 @@ public void testListAPI() throws IOException { } } - @Before - public void before() throws IOException { - cleanupImpl(); - } - - @After - public void tearDown() throws IOException { - cleanupImpl(); - } - - private void cleanupImpl() throws IOException { - FileUtil.fullyDelete(del, true); - Assert.assertTrue(!del.exists()); - - FileUtil.fullyDelete(tmp, true); - Assert.assertTrue(!tmp.exists()); - - FileUtil.fullyDelete(partitioned, true); - Assert.assertTrue(!partitioned.exists()); - } - @Test (timeout = 30000) public void testFullyDelete() throws IOException { - setupDirs(); boolean ret = FileUtil.fullyDelete(del); Assert.assertTrue(ret); Assert.assertFalse(del.exists()); @@ -265,8 +277,6 @@ public void testFullyDelete() throws IOException { */ @Test (timeout = 30000) public void testFullyDeleteSymlinks() throws IOException { - setupDirs(); - File link = new File(del, LINK); Assert.assertEquals(5, del.list().length); // Since tmpDir is symlink to tmp, fullyDelete(tmpDir) should not @@ -295,7 +305,6 @@ public void testFullyDeleteSymlinks() throws IOException { */ @Test (timeout = 30000) public void testFullyDeleteDanglingSymlinks() throws IOException { - setupDirs(); // delete the directory tmp to make tmpDir a dangling link to dir tmp and // to make y as a dangling link to file tmp/x boolean ret = FileUtil.fullyDelete(tmp); @@ -322,7 +331,6 @@ public void testFullyDeleteDanglingSymlinks() throws IOException { @Test (timeout = 30000) public void testFullyDeleteContents() throws IOException { - setupDirs(); boolean ret = FileUtil.fullyDeleteContents(del); Assert.assertTrue(ret); Assert.assertTrue(del.exists()); @@ -336,15 +344,6 @@ private void validateTmpDir() { Assert.assertTrue(new File(tmp, FILE).exists()); } - private final File xSubDir = new File(del, "xSubDir"); - private final File xSubSubDir = new File(xSubDir, "xSubSubDir"); - private final File ySubDir = new File(del, "ySubDir"); - private static final String file1Name = "file1"; - private final File file2 = new File(xSubDir, "file2"); - private final File file22 = new File(xSubSubDir, "file22"); - private final File file3 = new File(ySubDir, "file3"); - private final File zlink = new File(del, "zlink"); - /** * Creates a directory which can not be deleted completely. * @@ -366,36 +365,30 @@ private void validateTmpDir() { * @throws IOException */ private void setupDirsAndNonWritablePermissions() throws IOException { - Assert.assertFalse("The directory del should not have existed!", - del.exists()); - del.mkdirs(); - new MyFile(del, file1Name).createNewFile(); + new MyFile(del, FILE_1_NAME).createNewFile(); // "file1" is non-deletable by default, see MyFile.delete(). xSubDir.mkdirs(); file2.createNewFile(); - + xSubSubDir.mkdirs(); file22.createNewFile(); - + revokePermissions(file22); revokePermissions(xSubSubDir); - + revokePermissions(file2); revokePermissions(xSubDir); - + ySubDir.mkdirs(); file3.createNewFile(); - Assert.assertFalse("The directory tmp should not have existed!", - tmp.exists()); - tmp.mkdirs(); File tmpFile = new File(tmp, FILE); tmpFile.createNewFile(); FileUtil.symLink(tmpFile.toString(), zlink.toString()); } - + private static void grantPermissions(final File f) { FileUtil.setReadable(f, true); FileUtil.setWritable(f, true); @@ -417,7 +410,7 @@ private void validateAndSetWritablePermissions( Assert.assertFalse("The return value should have been false.", ret); Assert.assertTrue("The file file1 should not have been deleted.", - new File(del, file1Name).exists()); + new File(del, FILE_1_NAME).exists()); Assert.assertEquals( "The directory xSubDir *should* not have been deleted.", @@ -445,7 +438,7 @@ public void testFailFullyDelete() throws IOException { boolean ret = FileUtil.fullyDelete(new MyFile(del)); validateAndSetWritablePermissions(true, ret); } - + @Test (timeout = 30000) public void testFailFullyDeleteGrantPermissions() throws IOException { setupDirsAndNonWritablePermissions(); @@ -482,7 +475,7 @@ public MyFile(File parent, String child) { public boolean delete() { LOG.info("Trying to delete myFile " + getAbsolutePath()); boolean bool = false; - if (getName().equals(file1Name)) { + if (getName().equals(FILE_1_NAME)) { bool = false; } else { bool = super.delete(); @@ -532,7 +525,7 @@ public void testFailFullyDeleteContentsGrantPermissions() throws IOException { // this time the directories with revoked permissions *should* be deleted: validateAndSetWritablePermissions(false, ret); } - + /** * Test that getDU is able to handle cycles caused due to symbolic links * and that directory sizes are not added to the final calculated size @@ -540,9 +533,7 @@ public void testFailFullyDeleteContentsGrantPermissions() throws IOException { */ @Test (timeout = 30000) public void testGetDU() throws Exception { - setupDirs(); - - long du = FileUtil.getDU(TEST_DIR); + long du = FileUtil.getDU(testFolder.getRoot()); // Only two files (in partitioned). Each has 3 characters + system-specific // line separator. final long expected = 2 * (3 + System.getProperty("line.separator").length()); @@ -591,8 +582,6 @@ public void testGetDU() throws Exception { @Test (timeout = 30000) public void testUnTar() throws IOException { - setupDirs(); - // make a simple tar: final File simpleTar = new File(del, FILE); OutputStream os = new FileOutputStream(simpleTar); @@ -629,7 +618,6 @@ public void testUnTar() throws IOException { @Test (timeout = 30000) public void testReplaceFile() throws IOException { - setupDirs(); final File srcFile = new File(tmp, "src"); // src exists, and target does not exist: @@ -671,7 +659,6 @@ public void testReplaceFile() throws IOException { @Test (timeout = 30000) public void testCreateLocalTempFile() throws IOException { - setupDirs(); final File baseFile = new File(tmp, "base"); File tmp1 = FileUtil.createLocalTempFile(baseFile, "foo", false); File tmp2 = FileUtil.createLocalTempFile(baseFile, "foo", true); @@ -687,8 +674,7 @@ public void testCreateLocalTempFile() throws IOException { @Test (timeout = 30000) public void testUnZip() throws IOException { - setupDirs(); - // make a simple zip + // make sa simple zip final File simpleZip = new File(del, FILE); OutputStream os = new FileOutputStream(simpleZip); ZipOutputStream tos = new ZipOutputStream(os); @@ -724,7 +710,6 @@ public void testUnZip() throws IOException { @Test (timeout = 30000) public void testUnZip2() throws IOException { - setupDirs(); // make a simple zip final File simpleZip = new File(del, FILE); OutputStream os = new FileOutputStream(simpleZip); @@ -755,8 +740,6 @@ public void testUnZip2() throws IOException { * Test method copy(FileSystem srcFS, Path src, File dst, boolean deleteSource, Configuration conf) */ public void testCopy5() throws IOException { - setupDirs(); - URI uri = tmp.toURI(); Configuration conf = new Configuration(); FileSystem fs = FileSystem.newInstance(uri, conf); @@ -846,9 +829,6 @@ public void testStat2Paths2() { @Test (timeout = 30000) public void testSymlink() throws Exception { - Assert.assertFalse(del.exists()); - del.mkdirs(); - byte[] data = "testSymLink".getBytes(); File file = new File(del, FILE); @@ -881,9 +861,6 @@ public void testSymlink() throws Exception { */ @Test (timeout = 30000) public void testSymlinkRenameTo() throws Exception { - Assert.assertFalse(del.exists()); - del.mkdirs(); - File file = new File(del, FILE); file.createNewFile(); File link = new File(del, "_link"); @@ -913,9 +890,6 @@ public void testSymlinkRenameTo() throws Exception { */ @Test (timeout = 30000) public void testSymlinkDelete() throws Exception { - Assert.assertFalse(del.exists()); - del.mkdirs(); - File file = new File(del, FILE); file.createNewFile(); File link = new File(del, "_link"); @@ -937,9 +911,6 @@ public void testSymlinkDelete() throws Exception { */ @Test (timeout = 30000) public void testSymlinkLength() throws Exception { - Assert.assertFalse(del.exists()); - del.mkdirs(); - byte[] data = "testSymLinkData".getBytes(); File file = new File(del, FILE); @@ -976,9 +947,6 @@ public void testSymlinkLength() throws Exception { */ @Test public void testSymlinkWithNullInput() throws IOException { - Assert.assertFalse(del.exists()); - del.mkdirs(); - File file = new File(del, FILE); File link = new File(del, "_link"); @@ -996,9 +964,6 @@ public void testSymlinkWithNullInput() throws IOException { // The operation should fail and returns 1 result = FileUtil.symLink(null, link.getAbsolutePath()); Assert.assertEquals(1, result); - - file.delete(); - link.delete(); } /** @@ -1009,9 +974,6 @@ public void testSymlinkWithNullInput() throws IOException { */ @Test public void testSymlinkFileAlreadyExists() throws IOException { - Assert.assertFalse(del.exists()); - del.mkdirs(); - File file = new File(del, FILE); File link = new File(del, "_link"); @@ -1027,9 +989,6 @@ public void testSymlinkFileAlreadyExists() throws IOException { result1 = FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath()); Assert.assertEquals(1, result1); - - file.delete(); - link.delete(); } /** @@ -1041,19 +1000,16 @@ public void testSymlinkFileAlreadyExists() throws IOException { */ @Test public void testSymlinkSameFile() throws IOException { - Assert.assertFalse(del.exists()); - del.mkdirs(); - File file = new File(del, FILE); + file.delete(); + // Create a symbolic link // The operation should succeed int result = FileUtil.symLink(file.getAbsolutePath(), file.getAbsolutePath()); Assert.assertEquals(0, result); - - file.delete(); } /** @@ -1065,8 +1021,6 @@ public void testSymlinkSameFile() throws IOException { */ @Test public void testSymlink2DifferentFile() throws IOException { - Assert.assertFalse(del.exists()); - del.mkdirs(); File file = new File(del, FILE); File fileSecond = new File(del, FILE + "_1"); File link = new File(del, "_link"); @@ -1083,10 +1037,6 @@ public void testSymlink2DifferentFile() throws IOException { FileUtil.symLink(fileSecond.getAbsolutePath(), link.getAbsolutePath()); Assert.assertEquals(1, result); - - file.delete(); - fileSecond.delete(); - link.delete(); } /** @@ -1098,8 +1048,6 @@ public void testSymlink2DifferentFile() throws IOException { */ @Test public void testSymlink2DifferentLinks() throws IOException { - Assert.assertFalse(del.exists()); - del.mkdirs(); File file = new File(del, FILE); File link = new File(del, "_link"); File linkSecond = new File(del, "_link_1"); @@ -1116,10 +1064,6 @@ public void testSymlink2DifferentLinks() throws IOException { FileUtil.symLink(file.getAbsolutePath(), linkSecond.getAbsolutePath()); Assert.assertEquals(0, result); - - file.delete(); - link.delete(); - linkSecond.delete(); } private void doUntarAndVerify(File tarFile, File untarDir) @@ -1164,10 +1108,6 @@ public void testUntar() throws IOException { @Test (timeout = 30000) public void testCreateJarWithClassPath() throws Exception { - // setup test directory for files - Assert.assertFalse(tmp.exists()); - Assert.assertTrue(tmp.mkdirs()); - // create files expected to match a wildcard List wildcardMatches = Arrays.asList(new File(tmp, "wildcard1.jar"), new File(tmp, "wildcard2.jar"), new File(tmp, "wildcard3.JAR"), @@ -1256,9 +1196,6 @@ public void testGetJarsInDirectory() throws Exception { assertTrue("no jars should be returned for a bogus path", jars.isEmpty()); - // setup test directory for files - assertFalse(tmp.exists()); - assertTrue(tmp.mkdirs()); // create jar files to be returned File jar1 = new File(tmp, "wildcard1.jar"); @@ -1364,7 +1301,6 @@ public void testCompareFsDirectories() throws Exception { @Test(timeout = 8000) public void testCreateSymbolicLinkUsingJava() throws IOException { - setupDirs(); final File simpleTar = new File(del, FILE); OutputStream os = new FileOutputStream(simpleTar); TarArchiveOutputStream tos = new TarArchiveOutputStream(os); @@ -1458,9 +1394,6 @@ public void testReadSymlinkWithNullInput() { */ @Test public void testReadSymlink() throws IOException { - Assert.assertFalse(del.exists()); - del.mkdirs(); - File file = new File(del, FILE); File link = new File(del, "_link"); @@ -1469,9 +1402,6 @@ public void testReadSymlink() throws IOException { String result = FileUtil.readLink(link); Assert.assertEquals(file.getAbsolutePath(), result); - - file.delete(); - link.delete(); } /** @@ -1482,9 +1412,6 @@ public void testReadSymlink() throws IOException { */ @Test public void testReadSymlinkWithAFileAsInput() throws IOException { - Assert.assertFalse(del.exists()); - del.mkdirs(); - File file = new File(del, FILE); String result = FileUtil.readLink(file); @@ -1493,6 +1420,166 @@ public void testReadSymlinkWithAFileAsInput() throws IOException { file.delete(); } + /** + * Test that bytes are written out correctly to the local file system. + */ + @Test + public void testWriteBytesFileSystem() throws IOException { + URI uri = tmp.toURI(); + Configuration conf = new Configuration(); + FileSystem fs = FileSystem.get(uri, conf); + Path testPath = new Path(new Path(uri), "writebytes.out"); + + byte[] write = new byte[] {0x00, 0x01, 0x02, 0x03}; + + FileUtil.write(fs, testPath, write); + + byte[] read = FileUtils.readFileToByteArray(new File(testPath.toUri())); + + assertArrayEquals(write, read); + } + + /** + * Test that a Collection of Strings are written out correctly to the local + * file system. + */ + @Test + public void testWriteStringsFileSystem() throws IOException { + URI uri = tmp.toURI(); + Configuration conf = new Configuration(); + FileSystem fs = FileSystem.get(uri, conf); + Path testPath = new Path(new Path(uri), "writestrings.out"); + + Collection write = Arrays.asList("over", "the", "lazy", "dog"); + + FileUtil.write(fs, testPath, write, StandardCharsets.UTF_8); + + List read = + FileUtils.readLines(new File(testPath.toUri()), StandardCharsets.UTF_8); + + assertEquals(write, read); + } + + /** + * Test that a String is written out correctly to the local file system. + */ + @Test + public void testWriteStringFileSystem() throws IOException { + URI uri = tmp.toURI(); + Configuration conf = new Configuration(); + FileSystem fs = FileSystem.get(uri, conf); + Path testPath = new Path(new Path(uri), "writestring.out"); + + String write = "A" + "\u00ea" + "\u00f1" + "\u00fc" + "C"; + + FileUtil.write(fs, testPath, write, StandardCharsets.UTF_8); + + String read = FileUtils.readFileToString(new File(testPath.toUri()), + StandardCharsets.UTF_8); + + assertEquals(write, read); + } + + /** + * Test that a String is written out correctly to the local file system + * without specifying a character set. + */ + @Test + public void testWriteStringNoCharSetFileSystem() throws IOException { + URI uri = tmp.toURI(); + Configuration conf = new Configuration(); + FileSystem fs = FileSystem.get(uri, conf); + Path testPath = new Path(new Path(uri), "writestring.out"); + + String write = "A" + "\u00ea" + "\u00f1" + "\u00fc" + "C"; + FileUtil.write(fs, testPath, write); + + String read = FileUtils.readFileToString(new File(testPath.toUri()), + StandardCharsets.UTF_8); + + assertEquals(write, read); + } + + /** + * Test that bytes are written out correctly to the local file system. + */ + @Test + public void testWriteBytesFileContext() throws IOException { + URI uri = tmp.toURI(); + Configuration conf = new Configuration(); + FileContext fc = FileContext.getFileContext(uri, conf); + Path testPath = new Path(new Path(uri), "writebytes.out"); + + byte[] write = new byte[] {0x00, 0x01, 0x02, 0x03}; + + FileUtil.write(fc, testPath, write); + + byte[] read = FileUtils.readFileToByteArray(new File(testPath.toUri())); + + assertArrayEquals(write, read); + } + + /** + * Test that a Collection of Strings are written out correctly to the local + * file system. + */ + @Test + public void testWriteStringsFileContext() throws IOException { + URI uri = tmp.toURI(); + Configuration conf = new Configuration(); + FileContext fc = FileContext.getFileContext(uri, conf); + Path testPath = new Path(new Path(uri), "writestrings.out"); + + Collection write = Arrays.asList("over", "the", "lazy", "dog"); + + FileUtil.write(fc, testPath, write, StandardCharsets.UTF_8); + + List read = + FileUtils.readLines(new File(testPath.toUri()), StandardCharsets.UTF_8); + + assertEquals(write, read); + } + + /** + * Test that a String is written out correctly to the local file system. + */ + @Test + public void testWriteStringFileContext() throws IOException { + URI uri = tmp.toURI(); + Configuration conf = new Configuration(); + FileContext fc = FileContext.getFileContext(uri, conf); + Path testPath = new Path(new Path(uri), "writestring.out"); + + String write = "A" + "\u00ea" + "\u00f1" + "\u00fc" + "C"; + + FileUtil.write(fc, testPath, write, StandardCharsets.UTF_8); + + String read = FileUtils.readFileToString(new File(testPath.toUri()), + StandardCharsets.UTF_8); + + assertEquals(write, read); + } + + /** + * Test that a String is written out correctly to the local file system + * without specifying a character set. + */ + @Test + public void testWriteStringNoCharSetFileContext() throws IOException { + URI uri = tmp.toURI(); + Configuration conf = new Configuration(); + FileContext fc = FileContext.getFileContext(uri, conf); + Path testPath = new Path(new Path(uri), "writestring.out"); + + String write = "A" + "\u00ea" + "\u00f1" + "\u00fc" + "C"; + FileUtil.write(fc, testPath, write); + + String read = FileUtils.readFileToString(new File(testPath.toUri()), + StandardCharsets.UTF_8); + + assertEquals(write, read); + } + /** * The size of FileSystem cache. */ diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java index 3b923e05bd3a5..2097633839112 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.impl.OpenFileParameters; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; @@ -242,15 +243,11 @@ FutureDataInputStreamBuilder openFile(PathHandle pathHandle) CompletableFuture openFileWithOptions( PathHandle pathHandle, - Set mandatoryKeys, - Configuration options, - int bufferSize) throws IOException; + OpenFileParameters parameters) throws IOException; CompletableFuture openFileWithOptions( Path path, - Set mandatoryKeys, - Configuration options, - int bufferSize) throws IOException; + OpenFileParameters parameters) throws IOException; } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java index 07c99e0b6a528..79222ce67d6cf 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java @@ -22,11 +22,11 @@ import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.junit.Test; -import org.junit.internal.AssumptionViolatedException; +import org.junit.AssumptionViolatedException; -import java.io.FileNotFoundException; import java.io.IOException; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; @@ -40,7 +40,7 @@ * Test creating files, overwrite options etc. */ public abstract class AbstractContractCreateTest extends - AbstractFSContractTestBase { + AbstractFSContractTestBase { /** * How long to wait for a path to become visible. @@ -113,7 +113,6 @@ private void testOverwriteExistingFile(boolean useBuilder) throws Throwable { * This test catches some eventual consistency problems that blobstores exhibit, * as we are implicitly verifying that updates are consistent. This * is why different file lengths and datasets are used - * @throws Throwable */ @Test public void testOverwriteExistingFile() throws Throwable { @@ -137,10 +136,6 @@ private void testOverwriteEmptyDirectory(boolean useBuilder) } catch (FileAlreadyExistsException expected) { //expected handleExpectedException(expected); - } catch (FileNotFoundException e) { - handleRelaxedException("overwriting a dir with a file ", - "FileAlreadyExistsException", - e); } catch (IOException e) { handleRelaxedException("overwriting a dir with a file ", "FileAlreadyExistsException", @@ -189,10 +184,6 @@ private void testOverwriteNonEmptyDirectory(boolean useBuilder) } catch (FileAlreadyExistsException expected) { //expected handleExpectedException(expected); - } catch (FileNotFoundException e) { - handleRelaxedException("overwriting a dir with a file ", - "FileAlreadyExistsException", - e); } catch (IOException e) { handleRelaxedException("overwriting a dir with a file ", "FileAlreadyExistsException", @@ -332,4 +323,117 @@ public void testCreateMakesParentDirs() throws Throwable { assertTrue("Grandparent directory does not appear to be a directory", fs.getFileStatus(grandparent).isDirectory()); } + + @Test + public void testCreateFileUnderFile() throws Throwable { + describe("Verify that it is forbidden to create file/file"); + if (isSupported(CREATE_FILE_UNDER_FILE_ALLOWED)) { + // object store or some file systems: downgrade to a skip so that the + // failure is visible in test results + skip("This filesystem supports creating files under files"); + } + Path grandparent = methodPath(); + Path parent = new Path(grandparent, "parent"); + expectCreateUnderFileFails( + "creating a file under a file", + grandparent, + parent); + } + + @Test + public void testCreateUnderFileSubdir() throws Throwable { + describe("Verify that it is forbidden to create file/dir/file"); + if (isSupported(CREATE_FILE_UNDER_FILE_ALLOWED)) { + // object store or some file systems: downgrade to a skip so that the + // failure is visible in test results + skip("This filesystem supports creating files under files"); + } + Path grandparent = methodPath(); + Path parent = new Path(grandparent, "parent"); + Path child = new Path(parent, "child"); + expectCreateUnderFileFails( + "creating a file under a subdirectory of a file", + grandparent, + child); + } + + + @Test + public void testMkdirUnderFile() throws Throwable { + describe("Verify that it is forbidden to create file/dir"); + Path grandparent = methodPath(); + Path parent = new Path(grandparent, "parent"); + expectMkdirsUnderFileFails("mkdirs() under a file", + grandparent, parent); + } + + @Test + public void testMkdirUnderFileSubdir() throws Throwable { + describe("Verify that it is forbidden to create file/dir/dir"); + Path grandparent = methodPath(); + Path parent = new Path(grandparent, "parent"); + Path child = new Path(parent, "child"); + expectMkdirsUnderFileFails("mkdirs() file/dir", + grandparent, child); + + try { + // create the child + mkdirs(child); + } catch (FileAlreadyExistsException | ParentNotDirectoryException ex) { + // either of these may be raised. + handleExpectedException(ex); + } catch (IOException e) { + handleRelaxedException("creating a file under a subdirectory of a file ", + "FileAlreadyExistsException", + e); + } + } + + /** + * Expect that touch() will fail because the parent is a file. + * @param action action for message + * @param file filename to create + * @param descendant path under file + * @throws Exception failure + */ + protected void expectCreateUnderFileFails(String action, + Path file, Path descendant) + throws Exception { + createFile(file); + try { + // create the child + createFile(descendant); + } catch (FileAlreadyExistsException | ParentNotDirectoryException ex) { + //expected + handleExpectedException(ex); + } catch (IOException e) { + handleRelaxedException(action, + "ParentNotDirectoryException", + e); + } + } + + protected void expectMkdirsUnderFileFails(String action, + Path file, Path descendant) + throws Exception { + createFile(file); + try { + // now mkdirs + mkdirs(descendant); + } catch (FileAlreadyExistsException | ParentNotDirectoryException ex) { + //expected + handleExpectedException(ex); + } catch (IOException e) { + handleRelaxedException(action, + "ParentNotDirectoryException", + e); + } + } + + private void createFile(Path path) throws IOException { + byte[] data = dataset(256, 'a', 'z'); + FileSystem fs = getFileSystem(); + writeDataset(fs, path, data, data.length, 1024 * 1024, + true); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java index 6809fb339b562..328c8e1377904 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java @@ -86,7 +86,7 @@ public void testDeleteNonEmptyDirNonRecursive() throws Throwable { @Test public void testDeleteNonEmptyDirRecursive() throws Throwable { - Path path = path("testDeleteNonEmptyDirNonRecursive"); + Path path = path("testDeleteNonEmptyDirRecursive"); mkdirs(path); Path file = new Path(path, "childfile"); ContractTestUtils.writeTextFile(getFileSystem(), file, "goodbye, world", diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java index 85bd137813f66..f63314d39292e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java @@ -279,6 +279,14 @@ public void testListFilesNoDir() throws Throwable { } } + @Test + public void testListStatusIteratorNoDir() throws Throwable { + describe("test the listStatusIterator call on a path which is not " + + "present"); + intercept(FileNotFoundException.class, + () -> getFileSystem().listStatusIterator(path("missing"))); + } + @Test public void testLocatedStatusNoDir() throws Throwable { describe("test the LocatedStatus call on a path which is not present"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java index b6e94a664165e..a43053180fbf8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java @@ -281,6 +281,7 @@ public void testOpenFileApplyRead() throws Throwable { createFile(fs, path, true, dataset(len, 0x40, 0x80)); CompletableFuture readAllBytes = fs.openFile(path) + .withFileStatus(fs.getFileStatus(path)) .build() .thenApply(ContractTestUtils::readStream); assertEquals("Wrong number of bytes read value", @@ -302,4 +303,12 @@ public void testOpenFileApplyAsyncRead() throws Throwable { accepted.get()); } + @Test + public void testOpenFileNullStatus() throws Throwable { + describe("use openFile() with a null status"); + Path path = path("testOpenFileNullStatus"); + intercept(NullPointerException.class, + () -> getFileSystem().openFile(path).withFileStatus(null)); + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java index 2751294beb92c..78ff2541483a3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java @@ -29,10 +29,10 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.*; /** - * Test creating files, overwrite options &c + * Test renaming files. */ public abstract class AbstractContractRenameTest extends - AbstractFSContractTestBase { + AbstractFSContractTestBase { @Test public void testRenameNewFileSameDir() throws Throwable { @@ -83,7 +83,8 @@ public void testRenameNonexistentFile() throws Throwable { "FileNotFoundException", e); } - assertPathDoesNotExist("rename nonexistent file created a destination file", target); + assertPathDoesNotExist("rename nonexistent file created a destination file", + target); } /** @@ -112,7 +113,7 @@ public void testRenameFileOverExistingFile() throws Throwable { // the filesystem supports rename(file, file2) by overwriting file2 assertTrue("Rename returned false", renamed); - destUnchanged = false; + destUnchanged = false; } else { // rename is rejected by returning 'false' or throwing an exception if (renamed && !renameReturnsFalseOnRenameDestExists) { @@ -129,12 +130,13 @@ public void testRenameFileOverExistingFile() throws Throwable { // verify that the destination file is as expected based on the expected // outcome verifyFileContents(getFileSystem(), destFile, - destUnchanged? destData: srcData); + destUnchanged ? destData: srcData); } @Test public void testRenameDirIntoExistingDir() throws Throwable { - describe("Verify renaming a dir into an existing dir puts it underneath" + describe("Verify renaming a dir into an existing dir puts it" + + " underneath" +" and leaves existing files alone"); FileSystem fs = getFileSystem(); String sourceSubdir = "source"; @@ -145,15 +147,15 @@ public void testRenameDirIntoExistingDir() throws Throwable { Path destDir = path("dest"); Path destFilePath = new Path(destDir, "dest-512.txt"); - byte[] destDateset = dataset(512, 'A', 'Z'); - writeDataset(fs, destFilePath, destDateset, destDateset.length, 1024, false); + byte[] destData = dataset(512, 'A', 'Z'); + writeDataset(fs, destFilePath, destData, destData.length, 1024, false); assertIsFile(destFilePath); boolean rename = rename(srcDir, destDir); Path renamedSrc = new Path(destDir, sourceSubdir); assertIsFile(destFilePath); assertIsDirectory(renamedSrc); - verifyFileContents(fs, destFilePath, destDateset); + verifyFileContents(fs, destFilePath, destData); assertTrue("rename returned false though the contents were copied", rename); } @@ -204,7 +206,8 @@ public void testRenameWithNonEmptySubDir() throws Throwable { assertPathExists("not created in src/sub dir", new Path(srcSubDir, "subfile.txt")); - fs.rename(srcDir, finalDir); + rename(srcDir, finalDir); + // Accept both POSIX rename behavior and CLI rename behavior if (renameRemoveEmptyDest) { // POSIX rename behavior @@ -285,4 +288,54 @@ protected void validateAncestorsMoved(Path src, Path dst, String nestedPath) } } + @Test + public void testRenameFileUnderFile() throws Exception { + String action = "rename directly under file"; + describe(action); + Path base = methodPath(); + Path grandparent = new Path(base, "file"); + expectRenameUnderFileFails(action, + grandparent, + new Path(base, "testRenameSrc"), + new Path(grandparent, "testRenameTarget")); + } + + @Test + public void testRenameFileUnderFileSubdir() throws Exception { + String action = "rename directly under file/subdir"; + describe(action); + Path base = methodPath(); + Path grandparent = new Path(base, "file"); + Path parent = new Path(grandparent, "parent"); + expectRenameUnderFileFails(action, + grandparent, + new Path(base, "testRenameSrc"), + new Path(parent, "testRenameTarget")); + } + + protected void expectRenameUnderFileFails(String action, + Path file, Path renameSrc, Path renameTarget) + throws Exception { + byte[] data = dataset(256, 'a', 'z'); + FileSystem fs = getFileSystem(); + writeDataset(fs, file, data, data.length, 1024 * 1024, + true); + writeDataset(fs, renameSrc, data, data.length, 1024 * 1024, + true); + String outcome; + boolean renamed; + try { + renamed = rename(renameSrc, renameTarget); + outcome = action + ": rename (" + renameSrc + ", " + renameTarget + + ")= " + renamed; + } catch (IOException e) { + // raw local raises an exception here + renamed = false; + outcome = "rename raised an exception: " + e; + } + assertPathDoesNotExist("after " + outcome, renameTarget); + assertFalse(outcome, renamed); + assertPathExists(action, renameSrc); + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java index 7ba32bafa552b..5eb1e892f83d5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java @@ -18,12 +18,13 @@ package org.apache.hadoop.fs.contract; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.Path; - import org.junit.Test; import java.io.IOException; +import java.util.Arrays; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.Path; import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; @@ -34,21 +35,22 @@ public abstract class AbstractContractUnbufferTest extends AbstractFSContractTestBase { private Path file; + private byte[] fileBytes; @Override public void setup() throws Exception { super.setup(); skipIfUnsupported(SUPPORTS_UNBUFFER); file = path("unbufferFile"); - createFile(getFileSystem(), file, true, - dataset(TEST_FILE_LEN, 0, 255)); + fileBytes = dataset(TEST_FILE_LEN, 0, 255); + createFile(getFileSystem(), file, true, fileBytes); } @Test public void testUnbufferAfterRead() throws IOException { describe("unbuffer a file after a single read"); try (FSDataInputStream stream = getFileSystem().open(file)) { - assertEquals(128, stream.read(new byte[128])); + validateFullFileContents(stream); unbuffer(stream); } } @@ -58,15 +60,14 @@ public void testUnbufferBeforeRead() throws IOException { describe("unbuffer a file before a read"); try (FSDataInputStream stream = getFileSystem().open(file)) { unbuffer(stream); - assertEquals(128, stream.read(new byte[128])); + validateFullFileContents(stream); } } @Test public void testUnbufferEmptyFile() throws IOException { Path emptyFile = path("emptyUnbufferFile"); - createFile(getFileSystem(), emptyFile, true, - dataset(TEST_FILE_LEN, 0, 255)); + getFileSystem().create(emptyFile, true).close(); describe("unbuffer an empty file"); try (FSDataInputStream stream = getFileSystem().open(emptyFile)) { unbuffer(stream); @@ -79,13 +80,15 @@ public void testUnbufferOnClosedFile() throws IOException { FSDataInputStream stream = null; try { stream = getFileSystem().open(file); - assertEquals(128, stream.read(new byte[128])); + validateFullFileContents(stream); } finally { if (stream != null) { stream.close(); } } - unbuffer(stream); + if (stream != null) { + unbuffer(stream); + } } @Test @@ -94,32 +97,58 @@ public void testMultipleUnbuffers() throws IOException { try (FSDataInputStream stream = getFileSystem().open(file)) { unbuffer(stream); unbuffer(stream); - assertEquals(128, stream.read(new byte[128])); + validateFullFileContents(stream); unbuffer(stream); unbuffer(stream); } } - @Test + @Test public void testUnbufferMultipleReads() throws IOException { describe("unbuffer a file multiple times"); try (FSDataInputStream stream = getFileSystem().open(file)) { unbuffer(stream); - assertEquals(128, stream.read(new byte[128])); + validateFileContents(stream, TEST_FILE_LEN / 8, 0); unbuffer(stream); - assertEquals(128, stream.read(new byte[128])); - assertEquals(128, stream.read(new byte[128])); + validateFileContents(stream, TEST_FILE_LEN / 8, TEST_FILE_LEN / 8); + validateFileContents(stream, TEST_FILE_LEN / 4, TEST_FILE_LEN / 4); unbuffer(stream); - assertEquals(128, stream.read(new byte[128])); - assertEquals(128, stream.read(new byte[128])); - assertEquals(128, stream.read(new byte[128])); + validateFileContents(stream, TEST_FILE_LEN / 2, TEST_FILE_LEN / 2); unbuffer(stream); + assertEquals("stream should be at end of file", TEST_FILE_LEN, + stream.getPos()); } } private void unbuffer(FSDataInputStream stream) throws IOException { long pos = stream.getPos(); stream.unbuffer(); - assertEquals(pos, stream.getPos()); + assertEquals("unbuffer unexpectedly changed the stream position", pos, + stream.getPos()); + } + + protected void validateFullFileContents(FSDataInputStream stream) + throws IOException { + validateFileContents(stream, TEST_FILE_LEN, 0); + } + + protected void validateFileContents(FSDataInputStream stream, int length, + int startIndex) + throws IOException { + byte[] streamData = new byte[length]; + assertEquals("failed to read expected number of bytes from " + + "stream", length, stream.read(streamData)); + byte[] validateFileBytes; + if (startIndex == 0 && length == fileBytes.length) { + validateFileBytes = fileBytes; + } else { + validateFileBytes = Arrays.copyOfRange(fileBytes, startIndex, + startIndex + length); + } + assertArrayEquals("invalid file contents", validateFileBytes, streamData); + } + + protected Path getFile() { + return file; } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java index f09496a6082c8..76d3116c3abdc 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java @@ -69,6 +69,14 @@ public void init() throws IOException { } + /** + * Any teardown logic can go here. + * @throws IOException IO problems + */ + public void teardown() throws IOException { + + } + /** * Add a configuration resource to this instance's configuration * @param resource resource reference @@ -113,7 +121,7 @@ public FileSystem getFileSystem(URI uri) throws IOException { public abstract FileSystem getTestFileSystem() throws IOException; /** - * Get the scheme of this FS + * Get the scheme of this FS. * @return the scheme this FS supports */ public abstract String getScheme(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java index 1cd2164fad300..ac9de6d7bfe8c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java @@ -82,6 +82,15 @@ public static void nameTestThread() { Thread.currentThread().setName("JUnit"); } + @Before + public void nameThread() { + Thread.currentThread().setName("JUnit-" + getMethodName()); + } + + protected String getMethodName() { + return methodName.getMethodName(); + } + /** * This must be implemented by all instantiated test cases. * -provide the FS contract @@ -172,6 +181,7 @@ protected int getTestTimeoutMillis() { */ @Before public void setup() throws Exception { + Thread.currentThread().setName("setup"); LOG.debug("== Setup =="); contract = createContract(createConfiguration()); contract.init(); @@ -200,8 +210,12 @@ public void setup() throws Exception { */ @After public void teardown() throws Exception { + Thread.currentThread().setName("teardown"); LOG.debug("== Teardown =="); deleteTestDirInTeardown(); + if (contract != null) { + contract.teardown(); + } LOG.debug("== Teardown complete =="); } @@ -225,6 +239,15 @@ protected Path path(String filepath) throws IOException { new Path(getContract().getTestPath(), filepath)); } + /** + * Get a path whose name ends with the name of this method. + * @return a path implicitly unique amongst all methods in this class + * @throws IOException IO problems + */ + protected Path methodPath() throws IOException { + return path(methodName.getMethodName()); + } + /** * Take a simple path like "/something" and turn it into * a qualified path against the test FS. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java index 91a112141e987..3f31c07742c59 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java @@ -51,6 +51,15 @@ public interface ContractOptions { */ String CREATE_VISIBILITY_DELAYED = "create-visibility-delayed"; + /** + * Flag to indicate that it is possible to create a file under a file. + * This is a complete violation of the filesystem rules, but it is one + * which object stores have been known to do for performance + * and because nobody has ever noticed. + * {@value} + */ + String CREATE_FILE_UNDER_FILE_ALLOWED = "create-file-under-file-allowed"; + /** * Is a filesystem case sensitive. * Some of the filesystems that say "no" here may mean diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java index f61634943bb7f..4789630f95f1c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java @@ -418,8 +418,9 @@ public static boolean rm(FileSystem fileSystem, public static void rename(FileSystem fileSystem, Path src, Path dst) throws IOException { rejectRootOperation(src, false); - assertTrue(fileSystem.rename(src, dst)); - assertPathDoesNotExist(fileSystem, "renamed", src); + assertTrue("rename(" + src + ", " + dst + ") failed", + fileSystem.rename(src, dst)); + assertPathDoesNotExist(fileSystem, "renamed source dir", src); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/SFTPContract.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/SFTPContract.java new file mode 100644 index 0000000000000..f72a2aec86242 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/SFTPContract.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.contract.sftp; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.fs.sftp.SFTPFileSystem; +import org.apache.sshd.common.NamedFactory; +import org.apache.sshd.server.SshServer; +import org.apache.sshd.server.auth.UserAuth; +import org.apache.sshd.server.auth.password.UserAuthPasswordFactory; +import org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider; +import org.apache.sshd.server.subsystem.sftp.SftpSubsystemFactory; + +public class SFTPContract extends AbstractFSContract { + + private static final String CONTRACT_XML = "contract/sftp.xml"; + private static final URI TEST_URI = + URI.create("sftp://user:password@localhost"); + private final String testDataDir = + new FileSystemTestHelper().getTestRootDir(); + private final Configuration conf; + private SshServer sshd; + + public SFTPContract(Configuration conf) { + super(conf); + addConfResource(CONTRACT_XML); + this.conf = conf; + } + + @Override + public void init() throws IOException { + sshd = SshServer.setUpDefaultServer(); + // ask OS to assign a port + sshd.setPort(0); + sshd.setKeyPairProvider(new SimpleGeneratorHostKeyProvider()); + + List> userAuthFactories = new ArrayList<>(); + userAuthFactories.add(new UserAuthPasswordFactory()); + + sshd.setUserAuthFactories(userAuthFactories); + sshd.setPasswordAuthenticator((username, password, session) -> + username.equals("user") && password.equals("password") + ); + + sshd.setSubsystemFactories( + Collections.singletonList(new SftpSubsystemFactory())); + + sshd.start(); + int port = sshd.getPort(); + + conf.setClass("fs.sftp.impl", SFTPFileSystem.class, FileSystem.class); + conf.setInt("fs.sftp.host.port", port); + conf.setBoolean("fs.sftp.impl.disable.cache", true); + } + + @Override + public void teardown() throws IOException { + if (sshd != null) { + sshd.stop(); + } + } + + @Override + public FileSystem getTestFileSystem() throws IOException { + return FileSystem.get(TEST_URI, conf); + } + + @Override + public String getScheme() { + return "sftp"; + } + + @Override + public Path getTestPath() { + try { + FileSystem fs = FileSystem.get( + URI.create("sftp://user:password@localhost"), conf + ); + return fs.makeQualified(new Path(testDataDir)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/TestSFTPContractSeek.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/TestSFTPContractSeek.java new file mode 100644 index 0000000000000..20f4116b98019 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/TestSFTPContractSeek.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.contract.sftp; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractContractSeekTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; + +public class TestSFTPContractSeek extends AbstractContractSeekTest { + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new SFTPContract(conf); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/FtpTestServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/FtpTestServer.java new file mode 100644 index 0000000000000..eca26dea5b39b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/FtpTestServer.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.ftp; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; + +import org.apache.ftpserver.FtpServer; +import org.apache.ftpserver.FtpServerFactory; +import org.apache.ftpserver.ftplet.Authority; +import org.apache.ftpserver.ftplet.FtpException; +import org.apache.ftpserver.ftplet.UserManager; +import org.apache.ftpserver.impl.DefaultFtpServer; +import org.apache.ftpserver.listener.Listener; +import org.apache.ftpserver.listener.ListenerFactory; +import org.apache.ftpserver.usermanager.PropertiesUserManagerFactory; +import org.apache.ftpserver.usermanager.impl.BaseUser; + +/** + * Helper class facilitating to manage a local ftp + * server for unit tests purposes only. + */ +public class FtpTestServer { + + private int port; + private Path ftpRoot; + private UserManager userManager; + private FtpServer server; + + public FtpTestServer(Path ftpRoot) { + this.ftpRoot = ftpRoot; + this.userManager = new PropertiesUserManagerFactory().createUserManager(); + FtpServerFactory serverFactory = createServerFactory(); + serverFactory.setUserManager(userManager); + this.server = serverFactory.createServer(); + } + + public FtpTestServer start() throws Exception { + server.start(); + Listener listener = ((DefaultFtpServer) server) + .getListeners() + .get("default"); + port = listener.getPort(); + return this; + } + + public Path getFtpRoot() { + return ftpRoot; + } + + public int getPort() { + return port; + } + + public void stop() { + if (!server.isStopped()) { + server.stop(); + } + } + + public BaseUser addUser(String name, String password, + Authority... authorities) throws IOException, FtpException { + + BaseUser user = new BaseUser(); + user.setName(name); + user.setPassword(password); + Path userHome = Files.createDirectory(ftpRoot.resolve(name)); + user.setHomeDirectory(userHome.toString()); + user.setAuthorities(Arrays.asList(authorities)); + userManager.save(user); + return user; + } + + private FtpServerFactory createServerFactory() { + FtpServerFactory serverFactory = new FtpServerFactory(); + ListenerFactory defaultListener = new ListenerFactory(); + defaultListener.setPort(0); + serverFactory.addListener("default", defaultListener.createListener()); + return serverFactory; + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java index 3d41ccb91d6c4..d3750e64469b2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java @@ -17,18 +17,35 @@ */ package org.apache.hadoop.fs.ftp; +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.Comparator; + import com.google.common.base.Preconditions; import org.apache.commons.net.ftp.FTP; - import org.apache.commons.net.ftp.FTPClient; import org.apache.commons.net.ftp.FTPFile; +import org.apache.ftpserver.usermanager.impl.BaseUser; +import org.apache.ftpserver.usermanager.impl.WritePermission; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; +import org.junit.After; +import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; - +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; /** @@ -37,9 +54,75 @@ */ public class TestFTPFileSystem { + private FtpTestServer server; + private java.nio.file.Path testDir; @Rule public Timeout testTimeout = new Timeout(180000); + @Before + public void setUp() throws Exception { + testDir = Files.createTempDirectory( + GenericTestUtils.getTestDir().toPath(), getClass().getName() + ); + server = new FtpTestServer(testDir).start(); + } + + @After + @SuppressWarnings("ResultOfMethodCallIgnored") + public void tearDown() throws Exception { + if (server != null) { + server.stop(); + Files.walk(testDir) + .sorted(Comparator.reverseOrder()) + .map(java.nio.file.Path::toFile) + .forEach(File::delete); + } + } + + @Test + public void testCreateWithWritePermissions() throws Exception { + BaseUser user = server.addUser("test", "password", new WritePermission()); + Configuration configuration = new Configuration(); + configuration.set("fs.defaultFS", "ftp:///"); + configuration.set("fs.ftp.host", "localhost"); + configuration.setInt("fs.ftp.host.port", server.getPort()); + configuration.set("fs.ftp.user.localhost", user.getName()); + configuration.set("fs.ftp.password.localhost", user.getPassword()); + configuration.setBoolean("fs.ftp.impl.disable.cache", true); + + FileSystem fs = FileSystem.get(configuration); + byte[] bytesExpected = "hello world".getBytes(StandardCharsets.UTF_8); + try (FSDataOutputStream outputStream = fs.create(new Path("test1.txt"))) { + outputStream.write(bytesExpected); + } + try (FSDataInputStream input = fs.open(new Path("test1.txt"))) { + assertThat(bytesExpected, equalTo(IOUtils.readFullyToByteArray(input))); + } + } + + @Test + public void testCreateWithoutWritePermissions() throws Exception { + BaseUser user = server.addUser("test", "password"); + Configuration configuration = new Configuration(); + configuration.set("fs.defaultFS", "ftp:///"); + configuration.set("fs.ftp.host", "localhost"); + configuration.setInt("fs.ftp.host.port", server.getPort()); + configuration.set("fs.ftp.user.localhost", user.getName()); + configuration.set("fs.ftp.password.localhost", user.getPassword()); + configuration.setBoolean("fs.ftp.impl.disable.cache", true); + + FileSystem fs = FileSystem.get(configuration); + byte[] bytesExpected = "hello world".getBytes(StandardCharsets.UTF_8); + LambdaTestUtils.intercept( + IOException.class, "Unable to create file: test1.txt, Aborting", + () -> { + try (FSDataOutputStream out = fs.create(new Path("test1.txt"))) { + out.write(bytesExpected); + } + } + ); + } + @Test public void testFTPDefaultPort() throws Exception { FTPFileSystem ftp = new FTPFileSystem(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java index b74e75d9ef73d..4b3bd2f94075c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java @@ -31,6 +31,7 @@ import java.util.Arrays; import java.util.EnumSet; import java.util.Random; +import java.util.UUID; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -295,7 +296,8 @@ private void read() throws IOException { */ private void write() throws IOException { String dirName = dirs.get(r.nextInt(dirs.size())); - Path file = new Path(dirName, hostname+id); + Path file = + new Path(dirName, hostname + id + UUID.randomUUID().toString()); double fileSize = 0; while ((fileSize = r.nextGaussian()+2)<=0) {} genFile(file, (long)(fileSize*BLOCK_SIZE)); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopy.java index f73e83d858bc7..9172f85eb9cb7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopy.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopy.java @@ -121,7 +121,7 @@ public void testInterruptedCreate() throws Exception { tryCopyStream(in, false); verify(mockFs, never()).rename(any(Path.class), any(Path.class)); - verify(mockFs, never()).delete(eq(tmpPath), anyBoolean()); + verify(mockFs).delete(eq(tmpPath), anyBoolean()); verify(mockFs, never()).delete(eq(path), anyBoolean()); verify(mockFs, never()).close(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java index b5adfcf76157c..f101fed26bbf8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java @@ -411,6 +411,25 @@ public void processPathWithQuotasByMultipleStorageTypes() throws Exception { verifyNoMoreInteractions(out); } + @Test + public void processPathWithSnapshotHeader() throws Exception { + Path path = new Path("mockfs:/test"); + when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat); + PrintStream out = mock(PrintStream.class); + Count count = new Count(); + count.out = out; + LinkedList options = new LinkedList(); + options.add("-s"); + options.add("-v"); + options.add("dummy"); + count.processOptions(options); + String withSnapshotHeader = " DIR_COUNT FILE_COUNT CONTENT_SIZE " + + " SNAPSHOT_LENGTH SNAPSHOT_FILE_COUNT " + + " SNAPSHOT_DIR_COUNT SNAPSHOT_SPACE_CONSUMED PATHNAME"; + verify(out).println(withSnapshotHeader); + verifyNoMoreInteractions(out); + } + @Test public void getCommandName() { Count count = new Count(); @@ -448,7 +467,8 @@ public void getUsage() { Count count = new Count(); String actual = count.getUsage(); String expected = - "-count [-q] [-h] [-v] [-t []] [-u] [-x] [-e] ..."; + "-count [-q] [-h] [-v] [-t []]" + + " [-u] [-x] [-e] [-s] ..."; assertEquals("Count.getUsage", expected, actual); } @@ -480,7 +500,8 @@ public void getDescription() { + "storage types.\n" + "The -u option shows the quota and \n" + "the usage against the quota without the detailed content summary." - + "The -e option shows the erasure coding policy."; + + "The -e option shows the erasure coding policy." + + "The -s option shows snapshot counts."; assertEquals("Count.getDescription", expected, actual); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java index 1f379448ee86c..b9e87d3dacefe 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.PathExistsException; +import org.apache.hadoop.fs.shell.CommandFormat.UnknownOptionException; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -93,6 +94,12 @@ public void testMoveTargetExistsWithoutExplicitRename() throws Exception { assertTrue("Rename should have failed with path exists exception", cmd.error instanceof PathExistsException); } + + @Test(expected = UnknownOptionException.class) + public void testMoveFromLocalDoesNotAllowTOption() { + new MoveCommands.MoveFromLocal().run("-t", "2", + null, null); + } static class MockFileSystem extends FilterFileSystem { Configuration conf; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestHCFSMountTableConfigLoader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestHCFSMountTableConfigLoader.java new file mode 100644 index 0000000000000..bf7a6e32c8e93 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestHCFSMountTableConfigLoader.java @@ -0,0 +1,165 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URI; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Tests the mount table loading. + */ +public class TestHCFSMountTableConfigLoader { + + private static final String DOT = "."; + + private static final String TARGET_TWO = "/tar2"; + + private static final String TARGET_ONE = "/tar1"; + + private static final String SRC_TWO = "/src2"; + + private static final String SRC_ONE = "/src1"; + + private static final String TABLE_NAME = "test"; + + private MountTableConfigLoader loader = new HCFSMountTableConfigLoader(); + + private static FileSystem fsTarget; + private static Configuration conf; + private static Path targetTestRoot; + private static FileSystemTestHelper fileSystemTestHelper = + new FileSystemTestHelper(); + private static File oldVersionMountTableFile; + private static File newVersionMountTableFile; + private static final String MOUNT_LINK_KEY_SRC_ONE = + new StringBuilder(Constants.CONFIG_VIEWFS_PREFIX).append(DOT) + .append(TABLE_NAME).append(DOT).append(Constants.CONFIG_VIEWFS_LINK) + .append(DOT).append(SRC_ONE).toString(); + private static final String MOUNT_LINK_KEY_SRC_TWO = + new StringBuilder(Constants.CONFIG_VIEWFS_PREFIX).append(DOT) + .append(TABLE_NAME).append(DOT).append(Constants.CONFIG_VIEWFS_LINK) + .append(DOT).append(SRC_TWO).toString(); + + @BeforeClass + public static void init() throws Exception { + fsTarget = new LocalFileSystem(); + fsTarget.initialize(new URI("file:///"), new Configuration()); + targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget); + fsTarget.delete(targetTestRoot, true); + fsTarget.mkdirs(targetTestRoot); + } + + @Before + public void setUp() throws Exception { + conf = new Configuration(); + conf.set(String.format( + FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, "file"), + LocalFileSystem.class.getName()); + oldVersionMountTableFile = + new File(new URI(targetTestRoot.toString() + "/table.1.xml")); + oldVersionMountTableFile.createNewFile(); + newVersionMountTableFile = + new File(new URI(targetTestRoot.toString() + "/table.2.xml")); + newVersionMountTableFile.createNewFile(); + } + + @Test + public void testMountTableFileLoadingWhenMultipleFilesExist() + throws Exception { + ViewFsTestSetup.addMountLinksToFile(TABLE_NAME, + new String[] {SRC_ONE, SRC_TWO }, new String[] {TARGET_ONE, + TARGET_TWO }, + new Path(newVersionMountTableFile.toURI()), conf); + loader.load(targetTestRoot.toString(), conf); + Assert.assertEquals(conf.get(MOUNT_LINK_KEY_SRC_TWO), TARGET_TWO); + Assert.assertEquals(conf.get(MOUNT_LINK_KEY_SRC_ONE), TARGET_ONE); + } + + @Test + public void testMountTableFileWithInvalidFormat() throws Exception { + Path path = new Path(new URI( + targetTestRoot.toString() + "/testMountTableFileWithInvalidFormat/")); + fsTarget.mkdirs(path); + File invalidMountFileName = + new File(new URI(path.toString() + "/table.InvalidVersion.xml")); + invalidMountFileName.createNewFile(); + // Adding mount links to make sure it will not read it. + ViewFsTestSetup.addMountLinksToFile(TABLE_NAME, + new String[] {SRC_ONE, SRC_TWO }, new String[] {TARGET_ONE, + TARGET_TWO }, + new Path(invalidMountFileName.toURI()), conf); + // Pass mount table directory + loader.load(path.toString(), conf); + Assert.assertEquals(null, conf.get(MOUNT_LINK_KEY_SRC_TWO)); + Assert.assertEquals(null, conf.get(MOUNT_LINK_KEY_SRC_ONE)); + invalidMountFileName.delete(); + } + + @Test + public void testMountTableFileWithInvalidFormatWithNoDotsInName() + throws Exception { + Path path = new Path(new URI(targetTestRoot.toString() + + "/testMountTableFileWithInvalidFormatWithNoDots/")); + fsTarget.mkdirs(path); + File invalidMountFileName = + new File(new URI(path.toString() + "/tableInvalidVersionxml")); + invalidMountFileName.createNewFile(); + // Pass mount table directory + loader.load(path.toString(), conf); + Assert.assertEquals(null, conf.get(MOUNT_LINK_KEY_SRC_TWO)); + Assert.assertEquals(null, conf.get(MOUNT_LINK_KEY_SRC_ONE)); + invalidMountFileName.delete(); + } + + @Test(expected = FileNotFoundException.class) + public void testLoadWithMountFile() throws Exception { + loader.load(new URI(targetTestRoot.toString() + "/Non-Existent-File.xml") + .toString(), conf); + } + + @Test + public void testLoadWithNonExistentMountFile() throws Exception { + ViewFsTestSetup.addMountLinksToFile(TABLE_NAME, + new String[] {SRC_ONE, SRC_TWO }, + new String[] {TARGET_ONE, TARGET_TWO }, + new Path(oldVersionMountTableFile.toURI()), conf); + loader.load(oldVersionMountTableFile.toURI().toString(), conf); + Assert.assertEquals(conf.get(MOUNT_LINK_KEY_SRC_TWO), TARGET_TWO); + Assert.assertEquals(conf.get(MOUNT_LINK_KEY_SRC_ONE), TARGET_ONE); + } + + @AfterClass + public static void tearDown() throws IOException { + fsTarget.delete(targetTestRoot, true); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeCentralMountTableConfig.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeCentralMountTableConfig.java new file mode 100644 index 0000000000000..1527e3c1f30d8 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeCentralMountTableConfig.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.junit.Before; + +/** + * Test the TestViewFSOverloadSchemeCentralMountTableConfig with mount-table + * configuration files in configured fs location. + */ +public class TestViewFSOverloadSchemeCentralMountTableConfig + extends TestViewFileSystemOverloadSchemeLocalFileSystem { + private Path oldMountTablePath; + private Path latestMountTablepath; + + @Before + public void setUp() throws Exception { + super.setUp(); + // Mount table name format: mount-table..xml + String mountTableFileName1 = "mount-table.1.xml"; + String mountTableFileName2 = "mount-table.2.xml"; + oldMountTablePath = + new Path(getTestRoot() + File.separator + mountTableFileName1); + latestMountTablepath = + new Path(getTestRoot() + File.separator + mountTableFileName2); + getConf().set(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH, + getTestRoot().toString()); + File f = new File(oldMountTablePath.toUri()); + f.createNewFile(); // Just creating empty mount-table file. + File f2 = new File(latestMountTablepath.toUri()); + latestMountTablepath = new Path(f2.toURI()); + f2.createNewFile(); + } + + /** + * This method saves the mount links in a local files. + */ + @Override + void addMountLinks(String mountTable, String[] sources, String[] targets, + Configuration conf) throws IOException, URISyntaxException { + // we don't use conf here, instead we use config paths to store links. + // Mount-table old version file mount-table-.xml + try (BufferedWriter out = new BufferedWriter( + new FileWriter(new File(oldMountTablePath.toUri())))) { + out.write("\n"); + // Invalid tag. This file should not be read. + out.write(""); + out.write("\n"); + out.flush(); + } + ViewFsTestSetup.addMountLinksToFile(mountTable, sources, targets, + latestMountTablepath, conf); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java new file mode 100644 index 0000000000000..ac7a1a6899425 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java @@ -0,0 +1,174 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * + * Test the TestViewFileSystemOverloadSchemeLF using a file with authority: + * file://mountTableName/ i.e, the authority is used to load a mount table. + */ +public class TestViewFileSystemOverloadSchemeLocalFileSystem { + private static final String FILE = "file"; + private static final Log LOG = + LogFactory.getLog(TestViewFileSystemOverloadSchemeLocalFileSystem.class); + private FileSystem fsTarget; + private Configuration conf; + private Path targetTestRoot; + private FileSystemTestHelper fileSystemTestHelper = + new FileSystemTestHelper(); + + @Before + public void setUp() throws Exception { + conf = new Configuration(); + conf.set(String.format("fs.%s.impl", FILE), + ViewFileSystemOverloadScheme.class.getName()); + conf.set(String.format( + FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, FILE), + LocalFileSystem.class.getName()); + fsTarget = new LocalFileSystem(); + fsTarget.initialize(new URI("file:///"), conf); + // create the test root on local_fs + targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget); + fsTarget.delete(targetTestRoot, true); + fsTarget.mkdirs(targetTestRoot); + } + + /** + * Adds the given mount links to config. sources contains mount link src and + * the respective index location in targets contains the target uri. + */ + void addMountLinks(String mountTable, String[] sources, String[] targets, + Configuration config) throws IOException, URISyntaxException { + ViewFsTestSetup.addMountLinksToConf(mountTable, sources, targets, config); + } + + /** + * Tests write file and read file with ViewFileSystemOverloadScheme. + */ + @Test + public void testLocalTargetLinkWriteSimple() + throws IOException, URISyntaxException { + LOG.info("Starting testLocalTargetLinkWriteSimple"); + final String testString = "Hello Local!..."; + final Path lfsRoot = new Path("/lfsRoot"); + addMountLinks(null, new String[] {lfsRoot.toString() }, + new String[] {targetTestRoot + "/local" }, conf); + try (FileSystem lViewFs = FileSystem.get(URI.create("file:///"), conf)) { + final Path testPath = new Path(lfsRoot, "test.txt"); + try (FSDataOutputStream fsDos = lViewFs.create(testPath)) { + fsDos.writeUTF(testString); + } + + try (FSDataInputStream lViewIs = lViewFs.open(testPath)) { + Assert.assertEquals(testString, lViewIs.readUTF()); + } + } + } + + /** + * Tests create file and delete file with ViewFileSystemOverloadScheme. + */ + @Test + public void testLocalFsCreateAndDelete() throws Exception { + LOG.info("Starting testLocalFsCreateAndDelete"); + addMountLinks("mt", new String[] {"/lfsroot" }, + new String[] {targetTestRoot + "/wd2" }, conf); + final URI mountURI = URI.create("file://mt/"); + try (FileSystem lViewFS = FileSystem.get(mountURI, conf)) { + Path testPath = new Path(mountURI.toString() + "/lfsroot/test"); + lViewFS.createNewFile(testPath); + Assert.assertTrue(lViewFS.exists(testPath)); + lViewFS.delete(testPath, true); + Assert.assertFalse(lViewFS.exists(testPath)); + } + } + + /** + * Tests root level file with linkMergeSlash with + * ViewFileSystemOverloadScheme. + */ + @Test + public void testLocalFsLinkSlashMerge() throws Exception { + LOG.info("Starting testLocalFsLinkSlashMerge"); + addMountLinks("mt", + new String[] {Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH }, + new String[] {targetTestRoot + "/wd2" }, conf); + final URI mountURI = URI.create("file://mt/"); + try (FileSystem lViewFS = FileSystem.get(mountURI, conf)) { + Path fileOnRoot = new Path(mountURI.toString() + "/NewFile"); + lViewFS.createNewFile(fileOnRoot); + Assert.assertTrue(lViewFS.exists(fileOnRoot)); + } + } + + /** + * Tests with linkMergeSlash and other mounts in + * ViewFileSystemOverloadScheme. + */ + @Test(expected = IOException.class) + public void testLocalFsLinkSlashMergeWithOtherMountLinks() throws Exception { + LOG.info("Starting testLocalFsLinkSlashMergeWithOtherMountLinks"); + addMountLinks("mt", + new String[] {"/lfsroot", Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH }, + new String[] {targetTestRoot + "/wd2", targetTestRoot + "/wd2" }, conf); + final URI mountURI = URI.create("file://mt/"); + FileSystem.get(mountURI, conf); + Assert.fail("A merge slash cannot be configured with other mount links."); + } + + @After + public void tearDown() throws Exception { + if (null != fsTarget) { + fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true); + fsTarget.close(); + } + } + + /** + * Returns the test root dir. + */ + public Path getTestRoot() { + return this.targetTestRoot; + } + + /** + * Returns the conf. + */ + public Configuration getConf() { + return this.conf; + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java index 4902d733e954b..59588a527f46e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java @@ -1279,7 +1279,8 @@ public void testLinkTarget() throws Exception { @Test public void testViewFileSystemInnerCache() throws Exception { - ViewFileSystem.InnerCache cache = new ViewFileSystem.InnerCache(); + ViewFileSystem.InnerCache cache = + new ViewFileSystem.InnerCache(new FsGetter()); FileSystem fs = cache.get(fsTarget.getUri(), conf); // InnerCache caches filesystem. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java index 9b7e17f4a601a..efced73943ed5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java @@ -17,16 +17,21 @@ */ package org.apache.hadoop.fs.viewfs; +import java.io.IOException; import java.net.URI; +import java.net.URISyntaxException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContextTestHelper; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.viewfs.ConfigUtil; +import org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme.ChildFsGetter; import org.apache.hadoop.util.Shell; import org.eclipse.jetty.util.log.Log; +import org.junit.Assert; /** @@ -132,4 +137,84 @@ static void linkUpFirstComponents(Configuration conf, String path, + firstComponent + "->" + linkTarget); } + /** + * Adds the given mount links to the given Hadoop compatible file system path. + * Mount link mappings are in sources, targets at their respective index + * locations. + */ + static void addMountLinksToFile(String mountTable, String[] sources, + String[] targets, Path mountTableConfPath, Configuration conf) + throws IOException, URISyntaxException { + ChildFsGetter cfs = new ViewFileSystemOverloadScheme.ChildFsGetter( + mountTableConfPath.toUri().getScheme()); + try (FileSystem fs = cfs.getNewInstance(mountTableConfPath.toUri(), + conf)) { + try (FSDataOutputStream out = fs.create(mountTableConfPath)) { + String prefix = + new StringBuilder(Constants.CONFIG_VIEWFS_PREFIX).append(".") + .append((mountTable == null + ? Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE + : mountTable)) + .append(".").toString(); + out.writeBytes(""); + for (int i = 0; i < sources.length; i++) { + String src = sources[i]; + String target = targets[i]; + boolean isNfly = src.startsWith(Constants.CONFIG_VIEWFS_LINK_NFLY); + out.writeBytes(""); + if (isNfly) { + String[] srcParts = src.split("[.]"); + Assert.assertEquals("Invalid NFlyLink format", 3, srcParts.length); + String actualSrc = srcParts[srcParts.length - 1]; + String params = srcParts[srcParts.length - 2]; + out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK_NFLY + "." + + params + "." + actualSrc); + } else if (Constants.CONFIG_VIEWFS_LINK_FALLBACK.equals(src)) { + out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK_FALLBACK); + } else if (Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH.equals(src)) { + out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH); + } else { + out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK + "." + src); + } + out.writeBytes(""); + out.writeBytes(""); + out.writeBytes(target); + out.writeBytes(""); + out.flush(); + } + out.writeBytes(("")); + out.flush(); + } + } + } + + /** + * Adds the given mount links to the configuration. Mount link mappings are + * in sources, targets at their respective index locations. + */ + public static void addMountLinksToConf(String mountTable, String[] sources, + String[] targets, Configuration config) throws URISyntaxException { + for (int i = 0; i < sources.length; i++) { + String src = sources[i]; + String target = targets[i]; + String mountTableName = mountTable == null ? + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE : mountTable; + boolean isNfly = src.startsWith(Constants.CONFIG_VIEWFS_LINK_NFLY); + if (isNfly) { + String[] srcParts = src.split("[.]"); + Assert.assertEquals("Invalid NFlyLink format", 3, srcParts.length); + String actualSrc = srcParts[srcParts.length - 1]; + String params = srcParts[srcParts.length - 2]; + ConfigUtil.addLinkNfly(config, mountTableName, actualSrc, params, + target); + } else if (src.equals(Constants.CONFIG_VIEWFS_LINK_FALLBACK)) { + ConfigUtil.addLinkFallback(config, mountTableName, new URI(target)); + } else if (src.equals(Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH)) { + ConfigUtil.addLinkMergeSlash(config, mountTableName, new URI(target)); + } else { + ConfigUtil.addLink(config, mountTableName, src, new URI(target)); + } + } + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java index 064527c3fed6d..6505fbb8224f8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java @@ -22,7 +22,7 @@ import java.net.InetSocketAddress; import java.util.ArrayList; -import com.google.protobuf.BlockingService; +import org.apache.hadoop.thirdparty.protobuf.BlockingService; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java index 0e59aa1004666..63b9c63646d8b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java @@ -83,11 +83,6 @@ public void testAdminUsage() throws Exception { assertOutputContains("transitionToActive: incorrect number of arguments"); assertEquals(-1, runTool("-transitionToActive", "x", "y")); assertOutputContains("transitionToActive: incorrect number of arguments"); - assertEquals(-1, runTool("-failover")); - assertOutputContains("failover: incorrect arguments"); - assertOutputContains("failover: incorrect arguments"); - assertEquals(-1, runTool("-failover", "foo:1234")); - assertOutputContains("failover: incorrect arguments"); } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java index cc1174b2d2c72..63c87830b4529 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java @@ -19,15 +19,22 @@ import static org.junit.Assert.*; +import java.net.InetSocketAddress; import java.security.NoSuchAlgorithmException; import com.google.common.base.Supplier; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.ha.HealthMonitor.State; import org.apache.hadoop.ha.MiniZKFCCluster.DummyZKFC; +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; +import org.apache.hadoop.security.authorize.Service; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.Time; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooKeeper; @@ -128,6 +135,46 @@ public void testNoZK() throws Exception { runFC(svc)); } + @Test + public void testPolicyProviderForZKFCRpcServer() throws Exception { + Configuration myconf = new Configuration(); + myconf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, + true); + + DummyHAService dummyHAService = new DummyHAService(HAServiceState.ACTIVE, + new InetSocketAddress(0), false); + MiniZKFCCluster.DummyZKFC dummyZKFC = + new MiniZKFCCluster.DummyZKFC(myconf, dummyHAService); + + // initialize ZKFCRpcServer with null policy + LambdaTestUtils.intercept(HadoopIllegalArgumentException.class, + CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION + + "is configured to true but service-level" + + "authorization security policy is null.", + () -> new ZKFCRpcServer(myconf, new InetSocketAddress(0), + dummyZKFC, null)); + + // initialize ZKFCRpcServer with dummy policy + PolicyProvider dummyPolicy = new PolicyProvider() { + private final Service[] services = new Service[] { + new Service(CommonConfigurationKeys.SECURITY_ZKFC_PROTOCOL_ACL, + ZKFCProtocol.class), + new Service( + CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_POLICY, + RefreshAuthorizationPolicyProtocol.class), + }; + @Override + public Service[] getServices() { + return this.services; + } + }; + + ZKFCRpcServer server = new ZKFCRpcServer(myconf, + new InetSocketAddress(0), dummyZKFC, dummyPolicy); + server.start(); + server.stopAndJoin(); + } + @Test public void testFormatOneClusterLeavesOtherClustersAlone() throws Exception { DummyHAService svc = cluster.getService(1); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java index 5f7a264190953..cc76b4ad6d975 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java @@ -41,6 +41,7 @@ import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -62,16 +63,15 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest { LoggerFactory.getLogger(TestSSLHttpServer.class); private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites"; private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug"; - private static final String SSL_SERVER_KEYSTORE_PROP_PREFIX = "ssl.server" + - ".keystore"; - private static final String SSL_SERVER_TRUSTSTORE_PROP_PREFIX = "ssl.server" + - ".truststore"; + static final String SSL_SERVER_KEYSTORE_PROP_PREFIX = "ssl.server.keystore"; + static final String SSL_SERVER_TRUSTSTORE_PROP_PREFIX = "ssl.server" + + ".truststore"; - private static final String SERVLET_NAME_LONGHEADER = "longheader"; - private static final String SERVLET_PATH_LONGHEADER = + static final String SERVLET_NAME_LONGHEADER = "longheader"; + static final String SERVLET_PATH_LONGHEADER = "/" + SERVLET_NAME_LONGHEADER; - private static final String SERVLET_NAME_ECHO = "echo"; - private static final String SERVLET_PATH_ECHO = "/" + SERVLET_NAME_ECHO; + static final String SERVLET_NAME_ECHO = "echo"; + static final String SERVLET_PATH_ECHO = "/" + SERVLET_NAME_ECHO; private static HttpServer2 server; private static String keystoreDir; @@ -79,7 +79,7 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest { private static SSLFactory clientSslFactory; private static String cipherSuitesPropertyValue; private static String sslDebugPropertyValue; - private static final String EXCLUDED_CIPHERS = + static final String EXCLUDED_CIPHERS = "TLS_ECDHE_RSA_WITH_RC4_128_SHA," + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n" + "SSL_RSA_WITH_DES_CBC_SHA," @@ -87,9 +87,11 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest { + "SSL_RSA_EXPORT_WITH_RC4_40_MD5,\t \n" + "SSL_RSA_EXPORT_WITH_DES40_CBC_SHA," + "SSL_RSA_WITH_RC4_128_MD5 \t"; - private static final String ONE_ENABLED_CIPHERS = EXCLUDED_CIPHERS + private static final String ONE_ENABLED_CIPHERS_TLS1_2 = EXCLUDED_CIPHERS + ",TLS_RSA_WITH_AES_128_CBC_SHA"; - private static final String EXCLUSIVE_ENABLED_CIPHERS + private static final String ONE_ENABLED_CIPHERS_TLS1_3 = EXCLUDED_CIPHERS + + ",TLS_AES_128_GCM_SHA256"; + private static final String EXCLUSIVE_ENABLED_CIPHERS_TLS1_2 = "\tTLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, \n" + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA," + "TLS_RSA_WITH_AES_128_CBC_SHA," @@ -97,8 +99,12 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest { + "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA," + "TLS_DHE_RSA_WITH_AES_128_CBC_SHA,\t\n " + "TLS_DHE_DSS_WITH_AES_128_CBC_SHA"; + private static final String EXCLUSIVE_ENABLED_CIPHERS_TLS1_3 = + EXCLUSIVE_ENABLED_CIPHERS_TLS1_2 + ",TLS_AES_128_GCM_SHA256"; - private static final String INCLUDED_PROTOCOLS = "SSLv2Hello,TLSv1.1"; + + static final String INCLUDED_PROTOCOLS = "TLSv1.2"; + static final String INCLUDED_PROTOCOLS_JDK11 = "TLSv1.3,TLSv1.2"; @BeforeClass public static void setup() throws Exception { @@ -166,7 +172,7 @@ public static void cleanup() throws Exception { * This ensures that the value https.cipherSuites does * not affect the result of tests. */ - private static void storeHttpsCipherSuites() { + static void storeHttpsCipherSuites() { String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY); if (cipherSuites != null) { LOG.info( @@ -177,7 +183,7 @@ private static void storeHttpsCipherSuites() { System.clearProperty(HTTPS_CIPHER_SUITES_KEY); } - private static void restoreHttpsCipherSuites() { + static void restoreHttpsCipherSuites() { if (cipherSuitesPropertyValue != null) { LOG.info("Restoring property {} to value: {}", HTTPS_CIPHER_SUITES_KEY, cipherSuitesPropertyValue); @@ -186,7 +192,7 @@ private static void restoreHttpsCipherSuites() { } } - private static void turnOnSSLDebugLogging() { + static void turnOnSSLDebugLogging() { String sslDebug = System.getProperty(JAVAX_NET_DEBUG_KEY); if (sslDebug != null) { sslDebugPropertyValue = sslDebug; @@ -194,7 +200,7 @@ private static void turnOnSSLDebugLogging() { System.setProperty(JAVAX_NET_DEBUG_KEY, "all"); } - private static void restoreSSLDebugLogging() { + static void restoreSSLDebugLogging() { if (sslDebugPropertyValue != null) { System.setProperty(JAVAX_NET_DEBUG_KEY, sslDebugPropertyValue); sslDebugPropertyValue = null; @@ -292,22 +298,41 @@ public void testExcludedCiphers() throws Exception { @Test public void testIncludedProtocols() throws Exception { URL url = new URL(baseUrl, SERVLET_PATH_ECHO + "?a=b&c=d"); + + String includedProtocols = INCLUDED_PROTOCOLS; + if (Shell.isJavaVersionAtLeast(11)) { + includedProtocols = INCLUDED_PROTOCOLS_JDK11; + } HttpsURLConnection conn = getConnectionWithPreferredProtocolSSLSocketFactory(url, - INCLUDED_PROTOCOLS); + includedProtocols); assertFalse("included protocol list is empty", - INCLUDED_PROTOCOLS.isEmpty()); + includedProtocols.isEmpty()); readFromConnection(conn); + + PreferredProtocolSSLSocketFactory factory = + (PreferredProtocolSSLSocketFactory)conn.getSSLSocketFactory(); + + if (Shell.isJavaVersionAtLeast(11)) { + assertEquals("TLSv1.3", factory.getSocket().getSession().getProtocol()); + } else { + assertEquals("TLSv1.2", factory.getSocket().getSession().getProtocol()); + } } /** Test that verified that additionally included cipher - * TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA is only available cipher for working + * TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA (TLS 1.2) or + * TLS_AES_128_GCM_SHA256 (TLS 1.3) is only available cipher for working * TLS connection from client to server disabled for all other common ciphers. */ @Test public void testOneEnabledCiphers() throws Exception { - testEnabledCiphers(ONE_ENABLED_CIPHERS); + if (Shell.isJavaVersionAtLeast(11)) { + testEnabledCiphers(ONE_ENABLED_CIPHERS_TLS1_3); + } else { + testEnabledCiphers(ONE_ENABLED_CIPHERS_TLS1_2); + } } /** Test verifies that mutually exclusive server's disabled cipher suites and @@ -315,7 +340,11 @@ public void testOneEnabledCiphers() throws Exception { */ @Test public void testExclusiveEnabledCiphers() throws Exception { - testEnabledCiphers(EXCLUSIVE_ENABLED_CIPHERS); + if (Shell.isJavaVersionAtLeast(11)) { + testEnabledCiphers(EXCLUSIVE_ENABLED_CIPHERS_TLS1_3); + } else { + testEnabledCiphers(EXCLUSIVE_ENABLED_CIPHERS_TLS1_2); + } } private void testEnabledCiphers(String ciphers) throws @@ -406,6 +435,7 @@ private void setEnabledCipherSuites(SSLSocket sslSocket) { private class PreferredProtocolSSLSocketFactory extends SSLSocketFactory { private final SSLSocketFactory delegateSocketFactory; private final String[] enabledProtocols; + private SSLSocket sslSocket; PreferredProtocolSSLSocketFactory(SSLSocketFactory sslSocketFactory, String[] enabledProtocols) { @@ -417,6 +447,10 @@ private class PreferredProtocolSSLSocketFactory extends SSLSocketFactory { } } + public SSLSocket getSocket() { + return sslSocket; + } + @Override public String[] getDefaultCipherSuites() { return delegateSocketFactory.getDefaultCipherSuites(); @@ -430,7 +464,7 @@ public String[] getSupportedCipherSuites() { @Override public Socket createSocket(Socket socket, String string, int i, boolean bln) throws IOException { - SSLSocket sslSocket = (SSLSocket) delegateSocketFactory.createSocket( + sslSocket = (SSLSocket) delegateSocketFactory.createSocket( socket, string, i, bln); setEnabledProtocols(sslSocket); return sslSocket; @@ -438,7 +472,7 @@ public Socket createSocket(Socket socket, String string, int i, boolean bln) @Override public Socket createSocket(String string, int i) throws IOException { - SSLSocket sslSocket = (SSLSocket) delegateSocketFactory.createSocket( + sslSocket = (SSLSocket) delegateSocketFactory.createSocket( string, i); setEnabledProtocols(sslSocket); return sslSocket; @@ -447,7 +481,7 @@ public Socket createSocket(String string, int i) throws IOException { @Override public Socket createSocket(String string, int i, InetAddress ia, int i1) throws IOException { - SSLSocket sslSocket = (SSLSocket) delegateSocketFactory.createSocket( + sslSocket = (SSLSocket) delegateSocketFactory.createSocket( string, i, ia, i1); setEnabledProtocols(sslSocket); return sslSocket; @@ -455,7 +489,7 @@ public Socket createSocket(String string, int i, InetAddress ia, int i1) @Override public Socket createSocket(InetAddress ia, int i) throws IOException { - SSLSocket sslSocket = (SSLSocket) delegateSocketFactory.createSocket(ia, + sslSocket = (SSLSocket) delegateSocketFactory.createSocket(ia, i); setEnabledProtocols(sslSocket); return sslSocket; @@ -464,7 +498,7 @@ public Socket createSocket(InetAddress ia, int i) throws IOException { @Override public Socket createSocket(InetAddress ia, int i, InetAddress ia1, int i1) throws IOException { - SSLSocket sslSocket = (SSLSocket) delegateSocketFactory.createSocket(ia, + sslSocket = (SSLSocket) delegateSocketFactory.createSocket(ia, i, ia1, i1); setEnabledProtocols(sslSocket); return sslSocket; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServerConfigs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServerConfigs.java new file mode 100644 index 0000000000000..e88eba342874c --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServerConfigs.java @@ -0,0 +1,266 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.http; + +import com.google.common.base.Supplier; +import java.io.File; +import java.io.IOException; +import java.net.URI; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.security.ssl.KeyStoreTestUtil; +import org.apache.hadoop.security.ssl.SSLFactory; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import static org.apache.hadoop.http.TestSSLHttpServer.EXCLUDED_CIPHERS; +import static org.apache.hadoop.http.TestSSLHttpServer.INCLUDED_PROTOCOLS; +import static org.apache.hadoop.http.TestSSLHttpServer.SSL_SERVER_KEYSTORE_PROP_PREFIX; +import static org.apache.hadoop.http.TestSSLHttpServer.SSL_SERVER_TRUSTSTORE_PROP_PREFIX; +import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.CLIENT_KEY_STORE_PASSWORD_DEFAULT; +import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.SERVER_KEY_STORE_PASSWORD_DEFAULT; +import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.TRUST_STORE_PASSWORD_DEFAULT; + +/** + * Test suit for testing KeyStore and TrustStore password settings. + */ +public class TestSSLHttpServerConfigs { + + private static final String BASEDIR = + GenericTestUtils.getTempPath(TestSSLHttpServer.class.getSimpleName()); + + private static Configuration conf; + private static Configuration sslConf; + private static String keystoreDir; + private static String sslConfDir; + private static final String SERVER_PWD = SERVER_KEY_STORE_PASSWORD_DEFAULT; + private static final String CLIENT_PWD = CLIENT_KEY_STORE_PASSWORD_DEFAULT; + private static final String TRUST_STORE_PWD = TRUST_STORE_PASSWORD_DEFAULT; + + @Before + public void start() throws Exception { + TestSSLHttpServer.turnOnSSLDebugLogging(); + TestSSLHttpServer.storeHttpsCipherSuites(); + + conf = new Configuration(); + conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10); + + File base = new File(BASEDIR); + FileUtil.fullyDelete(base); + base.mkdirs(); + keystoreDir = new File(BASEDIR).getAbsolutePath(); + sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class); + } + + @After + public void shutdown() throws Exception { + FileUtil.fullyDelete(new File(BASEDIR)); + KeyStoreTestUtil.cleanupSSLConfig(keystoreDir, sslConfDir); + TestSSLHttpServer.restoreHttpsCipherSuites(); + TestSSLHttpServer.restoreSSLDebugLogging(); + } + + /** + * Setup KeyStore and TrustStore with given passwords. + */ + private void setupKeyStores(String serverPassword, + String clientPassword, String trustStorePassword) throws Exception { + + KeyStoreTestUtil.setupSSLConfig(keystoreDir, sslConfDir, conf, false, true, + EXCLUDED_CIPHERS, serverPassword, clientPassword, trustStorePassword); + + sslConf = KeyStoreTestUtil.getSslConfig(); + sslConf.set(SSLFactory.SSL_ENABLED_PROTOCOLS_KEY, INCLUDED_PROTOCOLS); + conf.set(SSLFactory.SSL_ENABLED_PROTOCOLS_KEY, INCLUDED_PROTOCOLS); + } + + /** + * Build HttpServer2 using the given passwords to access KeyStore/ TrustStore. + */ + private HttpServer2 setupServer(String keyStoreKeyPassword, + String keyStorePassword, String trustStorePassword) throws Exception { + + HttpServer2 server = new HttpServer2.Builder().setName("test") + .addEndpoint(new URI("https://localhost")).setConf(conf) + .keyPassword(keyStoreKeyPassword) + .keyStore(sslConf.get(SSL_SERVER_KEYSTORE_PROP_PREFIX + ".location"), + keyStorePassword, + sslConf.get(SSL_SERVER_KEYSTORE_PROP_PREFIX + ".type", "jks")) + .trustStore( + sslConf.get(SSL_SERVER_TRUSTSTORE_PROP_PREFIX + ".location"), + trustStorePassword, + sslConf.get(SSL_SERVER_TRUSTSTORE_PROP_PREFIX + ".type", "jks")) + .excludeCiphers(sslConf.get("ssl.server.exclude.cipher.list")).build(); + + return server; + } + + /** + * Test if HttpServer2 start succeeds in validating KeyStore/ TrustStore + * using the given passowords. + */ + private void testServerStart(String keyStoreKeyPassword, + String keyStorePassword, String trustStorePassword) throws Exception { + HttpServer2 server = setupServer(keyStoreKeyPassword, keyStorePassword, + trustStorePassword); + try { + server.start(); + + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return server.isAlive(); + } + }, 200, 100000); + } finally { + server.stop(); + } + } + + @Test(timeout=120000) + public void testServerSetup() throws Exception { + setupKeyStores(SERVER_PWD, CLIENT_PWD, TRUST_STORE_PWD); + testServerStart(SERVER_PWD, SERVER_PWD, TRUST_STORE_PWD); + } + + @Test(timeout=120000) + public void testServerSetupWithoutTrustPassword() throws Exception { + setupKeyStores(SERVER_PWD, CLIENT_PWD, TRUST_STORE_PWD); + testServerStart(SERVER_PWD, SERVER_PWD, null); + } + + @Test(timeout=120000) + public void testServerSetupWithoutKeyStorePassword() throws Exception { + setupKeyStores(SERVER_PWD, CLIENT_PWD, TRUST_STORE_PWD); + testServerStart(SERVER_PWD, null, null); + } + + @Test(timeout=120000) + public void testServerSetupWithoutKeyStoreKeyPassword() throws Exception { + setupKeyStores(SERVER_PWD, CLIENT_PWD, TRUST_STORE_PWD); + testServerStart(null, SERVER_PWD, null); + } + + @Test(timeout=120000) + public void testServerSetupWithNoKeyStorePassword() throws Exception { + setupKeyStores(SERVER_PWD, CLIENT_PWD, TRUST_STORE_PWD); + // Accessing KeyStore without either of KeyStore.KeyPassword or KeyStore + // .password should fail. + try { + testServerStart(null, null, null); + Assert.fail("Server should have failed to start without any " + + "KeyStore password."); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Problem starting http server", + e); + } + } + + @Test(timeout=120000) + public void testServerSetupWithWrongKeyStorePassword() throws Exception { + setupKeyStores(SERVER_PWD, CLIENT_PWD, TRUST_STORE_PWD); + + // Accessing KeyStore with wrong keyStore password/ keyPassword should fail. + try { + testServerStart(SERVER_PWD, "wrongPassword", null); + Assert.fail("Server should have failed to start with wrong " + + "KeyStore password."); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Keystore was tampered with, " + + "or password was incorrect", e); + } + + try { + testServerStart("wrongPassword", SERVER_PWD, null); + Assert.fail("Server should have failed to start with wrong " + + "KeyStore password."); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Problem starting http server", + e); + GenericTestUtils.assertExceptionContains("Cannot recover key", + e.getCause()); + } + } + + @Test(timeout=120000) + public void testKeyStoreSetupWithoutTrustStorePassword() throws Exception { + // Setup TrustStore without TrustStore password + setupKeyStores(SERVER_PWD, CLIENT_PWD, ""); + + // Accessing TrustStore without password (null password) should succeed + testServerStart(SERVER_PWD, SERVER_PWD, null); + + // Accessing TrustStore with wrong password (even if password is not + // set) should fail. + try { + testServerStart(SERVER_PWD, SERVER_PWD, "wrongPassword"); + Assert.fail("Server should have failed to start with wrong " + + "TrustStore password."); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Keystore was tampered with, " + + "or password was incorrect", e); + } + } + + @Test(timeout=120000) + public void testKeyStoreSetupWithoutKeyStorePassword() throws Exception { + // Setup KeyStore without KeyStore password + setupKeyStores(SERVER_PWD, "", TRUST_STORE_PWD); + + // Accessing KeyStore without password (null password) should succeed + testServerStart(SERVER_PWD, null, TRUST_STORE_PWD); + + // Accessing KeyStore with wrong password (even if password is not + // set) should fail. + try { + testServerStart(SERVER_PWD, "wrongPassword", TRUST_STORE_PWD); + Assert.fail("Server should have failed to start with wrong " + + "KeyStore password."); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Keystore was tampered with, " + + "or password was incorrect", e); + } + } + + @Test(timeout=120000) + public void testKeyStoreSetupWithoutPassword() throws Exception { + // Setup KeyStore without any password + setupKeyStores("", "", ""); + + // Accessing KeyStore with either one of KeyStore.Password or KeyStore + // .KeyPassword as empty string should pass. If the password is null, it + // is not set in SSLContextFactory while setting up the server. + testServerStart("", null, null); + testServerStart(null, "", null); + + try { + testServerStart(null, null, null); + Assert.fail("Server should have failed to start without " + + "KeyStore password."); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Problem starting http server", + e); + GenericTestUtils.assertExceptionContains("Password must not be null", + e.getCause()); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayWritable.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayWritable.java index ac8ad2e725920..20d4f08612964 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayWritable.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayWritable.java @@ -18,12 +18,12 @@ package org.apache.hadoop.io; -import java.io.*; - +import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.junit.Assert.assertArrayEquals; + +import java.io.IOException; + import org.junit.Test; @@ -84,23 +84,14 @@ public void testArrayWritableToArray() { /** * test {@link ArrayWritable} constructor with null */ - @Test + @Test(expected = IllegalArgumentException.class) public void testNullArgument() { - try { - Class valueClass = null; - new ArrayWritable(valueClass); - fail("testNullArgument error !!!"); - } catch (IllegalArgumentException exp) { - //should be for test pass - } catch (Exception e) { - fail("testNullArgument error !!!"); - } + new ArrayWritable((Class) null); } /** * test {@link ArrayWritable} constructor with {@code String[]} as a parameter */ - @SuppressWarnings("deprecation") @Test public void testArrayWritableStringConstructor() { String[] original = { "test1", "test2", "test3" }; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBloomMapFile.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBloomMapFile.java index 740540d5d23c8..a80f6e07b3878 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBloomMapFile.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBloomMapFile.java @@ -27,6 +27,8 @@ import java.util.Collections; import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; @@ -49,6 +51,8 @@ import org.junit.Test; public class TestBloomMapFile { + private static final Logger LOG = + LoggerFactory.getLogger(TestBloomMapFile.class); private static Configuration conf = new Configuration(); private static final Path TEST_ROOT = new Path(GenericTestUtils.getTempPath( TestMapFile.class.getSimpleName())); @@ -107,7 +111,7 @@ public void testMembershipTest() throws Exception { System.out.println("False positives: " + falsePos); assertTrue(falsePos < 2); } finally { - IOUtils.cleanup(null, writer, reader); + IOUtils.cleanupWithLogger(LOG, writer, reader); } } @@ -136,7 +140,7 @@ private void checkMembershipVaryingSizedKeys(List keys) reader.close(); fs.delete(qualifiedDirName, true); } finally { - IOUtils.cleanup(null, writer, reader); + IOUtils.cleanupWithLogger(LOG, writer, reader); } } @@ -173,7 +177,7 @@ public void testDeleteFile() { } catch (Exception ex) { fail("unexpect ex in testDeleteFile !!!"); } finally { - IOUtils.cleanup(null, writer); + IOUtils.cleanupWithLogger(LOG, writer); } } @@ -202,7 +206,7 @@ public void testIOExceptionInWriterConstructor() { } catch (Exception ex) { fail("unexpect ex in testIOExceptionInWriterConstructor !!!"); } finally { - IOUtils.cleanup(null, writer, reader); + IOUtils.cleanupWithLogger(LOG, writer, reader); } } @@ -237,7 +241,7 @@ public void testGetBloomMapFile() { } catch (Exception ex) { fail("unexpect ex in testGetBloomMapFile !!!"); } finally { - IOUtils.cleanup(null, writer, reader); + IOUtils.cleanupWithLogger(LOG, writer, reader); } } @@ -286,7 +290,7 @@ public void testBloomMapFileConstructors() { } catch (Exception ex) { fail("testBloomMapFileConstructors error !!!"); } finally { - IOUtils.cleanup(null, writer); + IOUtils.cleanupWithLogger(LOG, writer); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java index 7ec422732ecd8..d8a22f358adaa 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java @@ -26,6 +26,8 @@ import java.util.Iterator; import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; @@ -48,7 +50,7 @@ import static org.mockito.Mockito.*; public class TestMapFile { - + private static final Logger LOG = LoggerFactory.getLogger(TestMapFile.class); private static final Path TEST_DIR = new Path(GenericTestUtils.getTempPath( TestMapFile.class.getSimpleName())); @@ -187,7 +189,7 @@ public void testGetClosestOnCurrentApi() throws Exception { closest = (Text) reader.getClosest(key, value, true); assertEquals(new Text("91"), closest); } finally { - IOUtils.cleanup(null, writer, reader); + IOUtils.cleanupWithLogger(LOG, writer, reader); } } @@ -211,7 +213,7 @@ public void testMidKeyOnCurrentApi() throws Exception { reader = createReader(TEST_PREFIX, IntWritable.class); assertEquals(new IntWritable((SIZE - 1) / 2), reader.midKey()); } finally { - IOUtils.cleanup(null, writer, reader); + IOUtils.cleanupWithLogger(LOG, writer, reader); } } @@ -233,7 +235,7 @@ public void testRename() { } catch (IOException ex) { fail("testRename error " + ex); } finally { - IOUtils.cleanup(null, writer); + IOUtils.cleanupWithLogger(LOG, writer); } } @@ -265,7 +267,7 @@ public void testRenameWithException() { assertEquals("testRenameWithException invalid IOExceptionMessage !!!", ex.getMessage(), ERROR_MESSAGE); } finally { - IOUtils.cleanup(null, writer); + IOUtils.cleanupWithLogger(LOG, writer); } } @@ -292,7 +294,7 @@ public void testRenameWithFalse() { assertTrue("testRenameWithFalse invalid IOExceptionMessage error !!!", ex .getMessage().startsWith(ERROR_MESSAGE)); } finally { - IOUtils.cleanup(null, writer); + IOUtils.cleanupWithLogger(LOG, writer); } } @@ -319,7 +321,7 @@ public void testWriteWithFailDirCreation() { assertTrue("testWriteWithFailDirCreation ex error !!!", ex.getMessage() .startsWith(ERROR_MESSAGE)); } finally { - IOUtils.cleanup(null, writer); + IOUtils.cleanupWithLogger(LOG, writer); } } @@ -347,7 +349,7 @@ public void testOnFinalKey() { } catch (IOException ex) { fail("testOnFinalKey error !!!"); } finally { - IOUtils.cleanup(null, writer, reader); + IOUtils.cleanupWithLogger(LOG, writer, reader); } } @@ -392,7 +394,7 @@ public void testReaderGetClosest() throws Exception { } catch (IOException ex) { /* Should be thrown to pass the test */ } finally { - IOUtils.cleanup(null, writer, reader); + IOUtils.cleanupWithLogger(LOG, writer, reader); } } @@ -410,7 +412,7 @@ public void testReaderWithWrongValueClass() { } catch (IOException ex) { /* Should be thrown to pass the test */ } finally { - IOUtils.cleanup(null, writer); + IOUtils.cleanupWithLogger(LOG, writer); } } @@ -451,7 +453,7 @@ public void testReaderKeyIteration() { } catch (IOException ex) { fail("reader seek error !!!"); } finally { - IOUtils.cleanup(null, writer, reader); + IOUtils.cleanupWithLogger(LOG, writer, reader); } } @@ -482,7 +484,7 @@ public void testFix() { } catch (Exception ex) { fail("testFix error !!!"); } finally { - IOUtils.cleanup(null, writer); + IOUtils.cleanupWithLogger(LOG, writer); } } @@ -588,7 +590,7 @@ public void testDeprecatedConstructors() { } catch (IOException e) { fail(e.getMessage()); } finally { - IOUtils.cleanup(null, writer, reader); + IOUtils.cleanupWithLogger(LOG, writer, reader); } } @@ -607,7 +609,7 @@ public void testKeyLessWriterCreation() { } catch (Exception e) { fail("fail in testKeyLessWriterCreation. Other ex !!!"); } finally { - IOUtils.cleanup(null, writer); + IOUtils.cleanupWithLogger(LOG, writer); } } /** @@ -636,7 +638,7 @@ public void testPathExplosionWriterCreation() { } catch (Exception e) { fail("fail in testPathExplosionWriterCreation. Other ex !!!"); } finally { - IOUtils.cleanup(null, writer); + IOUtils.cleanupWithLogger(LOG, writer); } } @@ -657,7 +659,7 @@ public void testDescOrderWithThrowExceptionWriterAppend() { } catch (Exception e) { fail("testDescOrderWithThrowExceptionWriterAppend other ex throw !!!"); } finally { - IOUtils.cleanup(null, writer); + IOUtils.cleanupWithLogger(LOG, writer); } } @@ -745,7 +747,7 @@ public void testGetClosest() throws Exception { closest = (Text) reader.getClosest(key, value, true); assertEquals(new Text("90"), closest); } finally { - IOUtils.cleanup(null, writer, reader); + IOUtils.cleanupWithLogger(LOG, writer, reader); } } @@ -768,7 +770,7 @@ public void testMidKey() throws Exception { reader = new MapFile.Reader(qualifiedDirName, conf); assertEquals(new IntWritable(1), reader.midKey()); } finally { - IOUtils.cleanup(null, writer, reader); + IOUtils.cleanupWithLogger(LOG, writer, reader); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestObjectWritableProtos.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestObjectWritableProtos.java index 93704fb5fc676..f3012ded25bb5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestObjectWritableProtos.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestObjectWritableProtos.java @@ -24,8 +24,8 @@ import org.apache.hadoop.conf.Configuration; import org.junit.Test; -import com.google.protobuf.DescriptorProtos; -import com.google.protobuf.Message; +import org.apache.hadoop.thirdparty.protobuf.DescriptorProtos; +import org.apache.hadoop.thirdparty.protobuf.Message; /** * Test case for the use of Protocol Buffers within ObjectWritable. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java index 044824356ed30..cf64bbc0f9457 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java @@ -649,8 +649,9 @@ public void testCreateWriterOnExistingFile() throws IOException { @Test public void testRecursiveSeqFileCreate() throws IOException { FileSystem fs = FileSystem.getLocal(conf); - Path name = new Path(new Path(GenericTestUtils.getTempPath( - "recursiveCreateDir")), "file"); + Path parentDir = new Path(GenericTestUtils.getTempPath( + "recursiveCreateDir")); + Path name = new Path(parentDir, "file"); boolean createParent = false; try { @@ -662,11 +663,16 @@ public void testRecursiveSeqFileCreate() throws IOException { // Expected } - createParent = true; - SequenceFile.createWriter(fs, conf, name, RandomDatum.class, - RandomDatum.class, 512, (short) 1, 4096, createParent, - CompressionType.NONE, null, new Metadata()); - // should succeed, fails if exception thrown + try { + createParent = true; + SequenceFile.createWriter(fs, conf, name, RandomDatum.class, + RandomDatum.class, 512, (short) 1, 4096, createParent, + CompressionType.NONE, null, new Metadata()); + // should succeed, fails if exception thrown + } finally { + fs.deleteOnExit(parentDir); + fs.close(); + } } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java index 59856a4de11f9..54df39955d6cf 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java @@ -291,9 +291,9 @@ public void testTextText() throws CharacterCodingException { a.append("xdefgxxx".getBytes(), 1, 4); assertEquals("modified aliased string", "abc", b.toString()); assertEquals("appended string incorrectly", "abcdefg", a.toString()); - // add an extra byte so that capacity = 14 and length = 8 + // add an extra byte so that capacity = 10 and length = 8 a.append(new byte[]{'d'}, 0, 1); - assertEquals(14, a.getBytes().length); + assertEquals(10, a.getBytes().length); assertEquals(8, a.copyBytes().length); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java index 35f84b950e427..8be2dce06d1fe 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java @@ -126,7 +126,7 @@ private void addPair(T compressor, E decompressor, String name) { builder.add(new TesterPair(name, compressor, decompressor)); } - public void test() throws InstantiationException, IllegalAccessException { + public void test() throws Exception { pairs = builder.build(); pairs = assertionDelegate.filterOnAssumeWhat(pairs); @@ -287,47 +287,45 @@ private boolean checkSetInputArrayIndexOutOfBoundsException( @Override public void assertCompression(String name, Compressor compressor, - Decompressor decompressor, byte[] rawData) { + Decompressor decompressor, byte[] rawData) throws Exception { int cSize = 0; int decompressedSize = 0; - byte[] compressedResult = new byte[rawData.length]; + // Snappy compression can increase data size + int maxCompressedLength = 32 + rawData.length + rawData.length/6; + byte[] compressedResult = new byte[maxCompressedLength]; byte[] decompressedBytes = new byte[rawData.length]; - try { - assertTrue( - joiner.join(name, "compressor.needsInput before error !!!"), - compressor.needsInput()); - assertTrue( + assertTrue( + joiner.join(name, "compressor.needsInput before error !!!"), + compressor.needsInput()); + assertEquals( joiner.join(name, "compressor.getBytesWritten before error !!!"), - compressor.getBytesWritten() == 0); - compressor.setInput(rawData, 0, rawData.length); - compressor.finish(); - while (!compressor.finished()) { - cSize += compressor.compress(compressedResult, 0, - compressedResult.length); - } - compressor.reset(); - - assertTrue( - joiner.join(name, "decompressor.needsInput() before error !!!"), - decompressor.needsInput()); - decompressor.setInput(compressedResult, 0, cSize); - assertFalse( - joiner.join(name, "decompressor.needsInput() after error !!!"), - decompressor.needsInput()); - while (!decompressor.finished()) { - decompressedSize = decompressor.decompress(decompressedBytes, 0, - decompressedBytes.length); - } - decompressor.reset(); - assertTrue(joiner.join(name, " byte size not equals error !!!"), - decompressedSize == rawData.length); - assertArrayEquals( - joiner.join(name, " byte arrays not equals error !!!"), rawData, - decompressedBytes); - } catch (Exception ex) { - fail(joiner.join(name, ex.getMessage())); + 0, compressor.getBytesWritten()); + compressor.setInput(rawData, 0, rawData.length); + compressor.finish(); + while (!compressor.finished()) { + cSize += compressor.compress(compressedResult, 0, + compressedResult.length); + } + compressor.reset(); + + assertTrue( + joiner.join(name, "decompressor.needsInput() before error !!!"), + decompressor.needsInput()); + decompressor.setInput(compressedResult, 0, cSize); + assertFalse( + joiner.join(name, "decompressor.needsInput() after error !!!"), + decompressor.needsInput()); + while (!decompressor.finished()) { + decompressedSize = decompressor.decompress(decompressedBytes, 0, + decompressedBytes.length); } + decompressor.reset(); + assertEquals(joiner.join(name, " byte size not equals error !!!"), + rawData.length, decompressedSize); + assertArrayEquals( + joiner.join(name, " byte arrays not equals error !!!"), rawData, + decompressedBytes); } }), @@ -519,6 +517,6 @@ abstract static class TesterCompressionStrategy { protected final Logger logger = Logger.getLogger(getClass()); abstract void assertCompression(String name, Compressor compressor, - Decompressor decompressor, byte[] originalRawData); + Decompressor decompressor, byte[] originalRawData) throws Exception; } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java index cc986c7e0aea4..c8900bad1df56 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.io.compress.snappy; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -44,11 +45,16 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.junit.Assume.*; public class TestSnappyCompressorDecompressor { + public static final Logger LOG = + LoggerFactory.getLogger(TestSnappyCompressorDecompressor.class); + @Before public void before() { assumeTrue(SnappyCodec.isNativeCodeLoaded()); @@ -167,40 +173,41 @@ public void testSnappyDecompressorCompressAIOBException() { } @Test - public void testSnappyCompressDecompress() { + public void testSnappyCompressDecompress() throws Exception { int BYTE_SIZE = 1024 * 54; byte[] bytes = BytesGenerator.get(BYTE_SIZE); SnappyCompressor compressor = new SnappyCompressor(); - try { - compressor.setInput(bytes, 0, bytes.length); - assertTrue("SnappyCompressDecompress getBytesRead error !!!", - compressor.getBytesRead() > 0); - assertTrue( - "SnappyCompressDecompress getBytesWritten before compress error !!!", - compressor.getBytesWritten() == 0); - - byte[] compressed = new byte[BYTE_SIZE]; - int cSize = compressor.compress(compressed, 0, compressed.length); - assertTrue( - "SnappyCompressDecompress getBytesWritten after compress error !!!", - compressor.getBytesWritten() > 0); - - SnappyDecompressor decompressor = new SnappyDecompressor(BYTE_SIZE); - // set as input for decompressor only compressed data indicated with cSize - decompressor.setInput(compressed, 0, cSize); - byte[] decompressed = new byte[BYTE_SIZE]; - decompressor.decompress(decompressed, 0, decompressed.length); - - assertTrue("testSnappyCompressDecompress finished error !!!", - decompressor.finished()); - Assert.assertArrayEquals(bytes, decompressed); - compressor.reset(); - decompressor.reset(); - assertTrue("decompressor getRemaining error !!!", - decompressor.getRemaining() == 0); - } catch (Exception e) { - fail("testSnappyCompressDecompress ex error!!!"); - } + compressor.setInput(bytes, 0, bytes.length); + assertTrue("SnappyCompressDecompress getBytesRead error !!!", + compressor.getBytesRead() > 0); + assertEquals( + "SnappyCompressDecompress getBytesWritten before compress error !!!", + 0, compressor.getBytesWritten()); + + // snappy compression may increase data size. + // This calculation comes from "Snappy::MaxCompressedLength(size_t)" + int maxSize = 32 + BYTE_SIZE + BYTE_SIZE / 6; + byte[] compressed = new byte[maxSize]; + int cSize = compressor.compress(compressed, 0, compressed.length); + LOG.info("input size: {}", BYTE_SIZE); + LOG.info("compressed size: {}", cSize); + assertTrue( + "SnappyCompressDecompress getBytesWritten after compress error !!!", + compressor.getBytesWritten() > 0); + + SnappyDecompressor decompressor = new SnappyDecompressor(); + // set as input for decompressor only compressed data indicated with cSize + decompressor.setInput(compressed, 0, cSize); + byte[] decompressed = new byte[BYTE_SIZE]; + decompressor.decompress(decompressed, 0, decompressed.length); + + assertTrue("testSnappyCompressDecompress finished error !!!", + decompressor.finished()); + Assert.assertArrayEquals(bytes, decompressed); + compressor.reset(); + decompressor.reset(); + assertEquals("decompressor getRemaining error !!!", + 0, decompressor.getRemaining()); } @Test @@ -278,7 +285,38 @@ public void testSnappyBlockCompression() { fail("testSnappyBlockCompression ex error !!!"); } } - + + @Test + // The buffer size is smaller than the input. + public void testSnappyCompressDecompressWithSmallBuffer() throws Exception { + int inputSize = 1024 * 50; + int bufferSize = 512; + ByteArrayOutputStream out = new ByteArrayOutputStream(); + byte[] buffer = new byte[bufferSize]; + byte[] input = BytesGenerator.get(inputSize); + + SnappyCompressor compressor = new SnappyCompressor(); + compressor.setInput(input, 0, inputSize); + compressor.finish(); + while (!compressor.finished()) { + int len = compressor.compress(buffer, 0, buffer.length); + out.write(buffer, 0, len); + } + byte[] compressed = out.toByteArray(); + assertThat(compressed).hasSizeGreaterThan(0); + out.reset(); + + SnappyDecompressor decompressor = new SnappyDecompressor(); + decompressor.setInput(compressed, 0, compressed.length); + while (!decompressor.finished()) { + int len = decompressor.decompress(buffer, 0, buffer.length); + out.write(buffer, 0, len); + } + byte[] decompressed = out.toByteArray(); + + assertThat(decompressed).isEqualTo(input); + } + private void compressDecompressLoop(int rawDataSize) throws IOException { byte[] rawData = BytesGenerator.get(rawDataSize); byte[] compressedResult = new byte[rawDataSize+20]; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFile.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFile.java index 80aeef2d63672..ea20fbeda3d05 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFile.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFile.java @@ -109,7 +109,7 @@ private int readAndCheckbytes(Scanner scanner, int start, int n) byte[] val = readValue(scanner); String keyStr = String.format(localFormatter, i); String valStr = value + keyStr; - assertTrue("btyes for keys do not match " + keyStr + " " + assertTrue("bytes for keys do not match " + keyStr + " " + new String(key), Arrays.equals(keyStr.getBytes(), key)); assertTrue("bytes for vals do not match " + valStr + " " + new String(val), Arrays.equals( @@ -117,7 +117,7 @@ private int readAndCheckbytes(Scanner scanner, int start, int n) assertTrue(scanner.advance()); key = readKey(scanner); val = readValue(scanner); - assertTrue("btyes for keys do not match", Arrays.equals( + assertTrue("bytes for keys do not match", Arrays.equals( keyStr.getBytes(), key)); assertTrue("bytes for vals do not match", Arrays.equals( valStr.getBytes(), val)); @@ -146,11 +146,11 @@ private int readLargeRecords(Scanner scanner, int start, int n) for (int i = start; i < (start + n); i++) { byte[] key = readKey(scanner); String keyStr = String.format(localFormatter, i); - assertTrue("btyes for keys do not match", Arrays.equals( + assertTrue("bytes for keys do not match", Arrays.equals( keyStr.getBytes(), key)); scanner.advance(); key = readKey(scanner); - assertTrue("btyes for keys do not match", Arrays.equals( + assertTrue("bytes for keys do not match", Arrays.equals( keyStr.getBytes(), key)); scanner.advance(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java index a14928c7b4e24..c21fa443ddcc4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java @@ -800,7 +800,7 @@ public void testPmemCheckParameters() { // Incorrect file length try { - NativeIO.POSIX.Pmem.mapBlock(filePath, length); + NativeIO.POSIX.Pmem.mapBlock(filePath, length, false); fail("Illegal length parameter should be detected"); } catch (Exception e) { LOG.info(e.getMessage()); @@ -810,7 +810,7 @@ public void testPmemCheckParameters() { filePath = "/mnt/pmem0/test_native_io"; length = -1L; try { - NativeIO.POSIX.Pmem.mapBlock(filePath, length); + NativeIO.POSIX.Pmem.mapBlock(filePath, length, false); fail("Illegal length parameter should be detected"); }catch (Exception e) { LOG.info(e.getMessage()); @@ -837,10 +837,10 @@ public void testPmemMapMultipleFiles() { for (int i = 0; i < fileNumber; i++) { String path = filePath + i; LOG.info("File path = " + path); - NativeIO.POSIX.Pmem.mapBlock(path, length); + NativeIO.POSIX.Pmem.mapBlock(path, length, false); } try { - NativeIO.POSIX.Pmem.mapBlock(filePath, length); + NativeIO.POSIX.Pmem.mapBlock(filePath, length, false); fail("Request map extra file when persistent memory is all occupied"); } catch (Exception e) { LOG.info(e.getMessage()); @@ -863,7 +863,7 @@ public void testPmemMapBigFile() { length = volumeSize + 1024L; try { LOG.info("File length = " + length); - NativeIO.POSIX.Pmem.mapBlock(filePath, length); + NativeIO.POSIX.Pmem.mapBlock(filePath, length, false); fail("File length exceeds persistent memory total volume size"); }catch (Exception e) { LOG.info(e.getMessage()); @@ -881,7 +881,8 @@ public void testPmemCopy() throws IOException { // memory device. String filePath = "/mnt/pmem0/copy"; long length = 4096; - PmemMappedRegion region = NativeIO.POSIX.Pmem.mapBlock(filePath, length); + PmemMappedRegion region = NativeIO.POSIX.Pmem.mapBlock( + filePath, length, false); assertTrue(NativeIO.POSIX.Pmem.isPmem(region.getAddress(), length)); assertFalse(NativeIO.POSIX.Pmem.isPmem(region.getAddress(), length + 100)); assertFalse(NativeIO.POSIX.Pmem.isPmem(region.getAddress() + 100, length)); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java index 3b42bb46e828c..e1fc29f88126b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java @@ -377,4 +377,23 @@ public void testNoRetryOnAccessControlException() throws Exception { assertEquals(RetryDecision.FAIL, caughtRetryAction.action); } } + + @Test + public void testWrappedAccessControlException() throws Exception { + RetryPolicy policy = mock(RetryPolicy.class); + RetryPolicy realPolicy = RetryPolicies.failoverOnNetworkException(5); + setupMockPolicy(policy, realPolicy); + + UnreliableInterface unreliable = (UnreliableInterface) RetryProxy.create( + UnreliableInterface.class, unreliableImpl, policy); + + try { + unreliable.failsWithWrappedAccessControlException(); + fail("Should fail"); + } catch (IOException expected) { + verify(policy, times(1)).shouldRetry(any(Exception.class), anyInt(), + anyInt(), anyBoolean()); + assertEquals(RetryDecision.FAIL, caughtRetryAction.action); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java index a20d898988400..15a84bbad4a66 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java @@ -139,6 +139,13 @@ public void failsWithAccessControlExceptionEightTimes() } } + public void failsWithWrappedAccessControlException() + throws IOException { + AccessControlException ace = new AccessControlException(); + IOException ioe = new IOException(ace); + throw new IOException(ioe); + } + @Override public String succeedsOnceThenFailsReturningString() throws UnreliableException, IOException, StandbyException { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java index 738a76086bae2..80bf47dc23bea 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java @@ -83,6 +83,10 @@ public static class FatalException extends UnreliableException { void failsWithAccessControlExceptionEightTimes() throws AccessControlException; + @Idempotent + void failsWithWrappedAccessControlException() + throws IOException; + public String succeedsOnceThenFailsReturningString() throws UnreliableException, StandbyException, IOException; @Idempotent diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java index 9356dabe2f701..bbb4ec21812e3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ipc; import com.google.common.base.Joiner; -import com.google.protobuf.BlockingService; +import org.apache.hadoop.thirdparty.protobuf.BlockingService; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java index bb4717ed36d35..38b3fe5681024 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java @@ -176,6 +176,12 @@ public void assertCanPut(CallQueueManager cq, int numberOfPuts, private static final Class schedulerClass = CallQueueManager.convertSchedulerClass(DefaultRpcScheduler.class); + private static final Class> fcqueueClass + = CallQueueManager.convertQueueClass(FairCallQueue.class, FakeCall.class); + + private static final Class rpcSchedulerClass + = CallQueueManager.convertSchedulerClass(DecayRpcScheduler.class); + @Test public void testCallQueueCapacity() throws InterruptedException { manager = new CallQueueManager(queueClass, schedulerClass, false, @@ -319,6 +325,55 @@ public void testSwapUnderContention() throws InterruptedException { assertEquals(totalCallsConsumed, totalCallsCreated); } + @Test + public void testQueueCapacity() throws InterruptedException { + int capacity = 4; + String ns = "ipc.8020"; + conf.setInt("ipc.8020.scheduler.priority.levels", 2); + conf.set("ipc.8020.callqueue.capacity.weights", "1,3"); + manager = new CallQueueManager<>(fcqueueClass, rpcSchedulerClass, false, + capacity, ns, conf); + + // insert 4 calls with 2 at each priority + // since the queue with priority 0 has only 1 capacity, the second call + // with p0 will be overflowed to queue with priority 1 + for (int i = 0; i < capacity; i++) { + FakeCall fc = new FakeCall(i); + fc.setPriorityLevel(i%2); + manager.put(fc); + } + + // get calls, the order should be + // call 0 with p0 + // call 1 with p1 + // call 2 with p0 since overflow + // call 3 with p1 + assertEquals(manager.take().priorityLevel, 0); + assertEquals(manager.take().priorityLevel, 1); + assertEquals(manager.take().priorityLevel, 0); + assertEquals(manager.take().priorityLevel, 1); + + conf.set("ipc.8020.callqueue.capacity.weights", "1,1"); + manager = new CallQueueManager<>(fcqueueClass, rpcSchedulerClass, false, + capacity, ns, conf); + + for (int i = 0; i < capacity; i++) { + FakeCall fc = new FakeCall(i); + fc.setPriorityLevel(i%2); + manager.put(fc); + } + + // get calls, the order should be + // call 0 with p0 + // call 2 with p0 + // call 1 with p1 + // call 3 with p1 + assertEquals(manager.take().priorityLevel, 0); + assertEquals(manager.take().priorityLevel, 0); + assertEquals(manager.take().priorityLevel, 1); + assertEquals(manager.take().priorityLevel, 1); + } + public static class ExceptionFakeCall implements Schedulable { public ExceptionFakeCall() { throw new IllegalArgumentException("Exception caused by call queue " + diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java index 7bdc6b5e96d0c..71723325e2c86 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java @@ -66,15 +66,15 @@ public void testZeroScheduler() { @SuppressWarnings("deprecation") public void testParsePeriod() { // By default - scheduler = new DecayRpcScheduler(1, "", new Configuration()); + scheduler = new DecayRpcScheduler(1, "ipc.1", new Configuration()); assertEquals(DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_PERIOD_DEFAULT, scheduler.getDecayPeriodMillis()); // Custom Configuration conf = new Configuration(); - conf.setLong("ns." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, + conf.setLong("ipc.2." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, 1058); - scheduler = new DecayRpcScheduler(1, "ns", conf); + scheduler = new DecayRpcScheduler(1, "ipc.2", conf); assertEquals(1058L, scheduler.getDecayPeriodMillis()); } @@ -82,15 +82,15 @@ public void testParsePeriod() { @SuppressWarnings("deprecation") public void testParseFactor() { // Default - scheduler = new DecayRpcScheduler(1, "", new Configuration()); + scheduler = new DecayRpcScheduler(1, "ipc.3", new Configuration()); assertEquals(DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_FACTOR_DEFAULT, scheduler.getDecayFactor(), 0.00001); // Custom Configuration conf = new Configuration(); - conf.set("prefix." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_FACTOR_KEY, + conf.set("ipc.4." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_FACTOR_KEY, "0.125"); - scheduler = new DecayRpcScheduler(1, "prefix", conf); + scheduler = new DecayRpcScheduler(1, "ipc.4", conf); assertEquals(0.125, scheduler.getDecayFactor(), 0.00001); } @@ -106,23 +106,23 @@ public void assertEqualDecimalArrays(double[] a, double[] b) { public void testParseThresholds() { // Defaults vary by number of queues Configuration conf = new Configuration(); - scheduler = new DecayRpcScheduler(1, "", conf); + scheduler = new DecayRpcScheduler(1, "ipc.5", conf); assertEqualDecimalArrays(new double[]{}, scheduler.getThresholds()); - scheduler = new DecayRpcScheduler(2, "", conf); + scheduler = new DecayRpcScheduler(2, "ipc.6", conf); assertEqualDecimalArrays(new double[]{0.5}, scheduler.getThresholds()); - scheduler = new DecayRpcScheduler(3, "", conf); + scheduler = new DecayRpcScheduler(3, "ipc.7", conf); assertEqualDecimalArrays(new double[]{0.25, 0.5}, scheduler.getThresholds()); - scheduler = new DecayRpcScheduler(4, "", conf); + scheduler = new DecayRpcScheduler(4, "ipc.8", conf); assertEqualDecimalArrays(new double[]{0.125, 0.25, 0.5}, scheduler.getThresholds()); // Custom conf = new Configuration(); - conf.set("ns." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_THRESHOLDS_KEY, + conf.set("ipc.9." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_THRESHOLDS_KEY, "1, 10, 20, 50, 85"); - scheduler = new DecayRpcScheduler(6, "ns", conf); + scheduler = new DecayRpcScheduler(6, "ipc.9", conf); assertEqualDecimalArrays(new double[]{0.01, 0.1, 0.2, 0.5, 0.85}, scheduler.getThresholds()); } @@ -130,8 +130,9 @@ public void testParseThresholds() { @SuppressWarnings("deprecation") public void testAccumulate() { Configuration conf = new Configuration(); - conf.set("ns." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, "99999999"); // Never flush - scheduler = new DecayRpcScheduler(1, "ns", conf); + conf.set("ipc.10." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, + "99999999"); // Never flush + scheduler = new DecayRpcScheduler(1, "ipc.10", conf); assertEquals(0, scheduler.getCallCostSnapshot().size()); // empty first @@ -151,11 +152,11 @@ public void testAccumulate() { @SuppressWarnings("deprecation") public void testDecay() throws Exception { Configuration conf = new Configuration(); - conf.setLong("ns." // Never decay + conf.setLong("ipc.11." // Never decay + DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_PERIOD_KEY, 999999999); - conf.setDouble("ns." + conf.setDouble("ipc.11." + DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_FACTOR_KEY, 0.5); - scheduler = new DecayRpcScheduler(1, "ns", conf); + scheduler = new DecayRpcScheduler(1, "ipc.11", conf); assertEquals(0, scheduler.getTotalCallSnapshot()); @@ -202,7 +203,7 @@ public void testDecay() throws Exception { @SuppressWarnings("deprecation") public void testPriority() throws Exception { Configuration conf = new Configuration(); - final String namespace = "ns"; + final String namespace = "ipc.12"; conf.set(namespace + "." + DecayRpcScheduler .IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, "99999999"); // Never flush conf.set(namespace + "." + DecayRpcScheduler @@ -239,9 +240,11 @@ public void testPriority() throws Exception { @SuppressWarnings("deprecation") public void testPeriodic() throws InterruptedException { Configuration conf = new Configuration(); - conf.set("ns." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, "10"); - conf.set("ns." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_FACTOR_KEY, "0.5"); - scheduler = new DecayRpcScheduler(1, "ns", conf); + conf.set( + "ipc.13." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, "10"); + conf.set( + "ipc.13." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_FACTOR_KEY, "0.5"); + scheduler = new DecayRpcScheduler(1, "ipc.13", conf); assertEquals(10, scheduler.getDecayPeriodMillis()); assertEquals(0, scheduler.getTotalCallSnapshot()); @@ -269,7 +272,7 @@ public void testNPEatInitialization() throws InterruptedException { // MetricsSystemImpl to true DefaultMetricsSystem.initialize("NameNode"); Configuration conf = new Configuration(); - scheduler = new DecayRpcScheduler(1, "ns", conf); + scheduler = new DecayRpcScheduler(1, "ipc.14", conf); // check if there is npe in log assertFalse(bytes.toString().contains("NullPointerException")); } finally { @@ -280,7 +283,7 @@ public void testNPEatInitialization() throws InterruptedException { @Test public void testUsingWeightedTimeCostProvider() { - scheduler = getSchedulerWithWeightedTimeCostProvider(3); + scheduler = getSchedulerWithWeightedTimeCostProvider(3, "ipc.15"); // 3 details in increasing order of cost. Although medium has a longer // duration, the shared lock is weighted less than the exclusive lock @@ -330,7 +333,7 @@ public void testUsingWeightedTimeCostProvider() { @Test public void testUsingWeightedTimeCostProviderWithZeroCostCalls() { - scheduler = getSchedulerWithWeightedTimeCostProvider(2); + scheduler = getSchedulerWithWeightedTimeCostProvider(2, "ipc.16"); ProcessingDetails emptyDetails = new ProcessingDetails(TimeUnit.MILLISECONDS); @@ -347,7 +350,7 @@ public void testUsingWeightedTimeCostProviderWithZeroCostCalls() { @Test public void testUsingWeightedTimeCostProviderNoRequests() { - scheduler = getSchedulerWithWeightedTimeCostProvider(2); + scheduler = getSchedulerWithWeightedTimeCostProvider(2, "ipc.18"); assertEquals(0, scheduler.getPriorityLevel(mockCall("A"))); } @@ -357,13 +360,13 @@ public void testUsingWeightedTimeCostProviderNoRequests() { * normal decaying disabled. */ private static DecayRpcScheduler getSchedulerWithWeightedTimeCostProvider( - int priorityLevels) { + int priorityLevels, String ns) { Configuration conf = new Configuration(); - conf.setClass("ns." + CommonConfigurationKeys.IPC_COST_PROVIDER_KEY, + conf.setClass(ns + "." + CommonConfigurationKeys.IPC_COST_PROVIDER_KEY, WeightedTimeCostProvider.class, CostProvider.class); - conf.setLong("ns." + conf.setLong(ns + "." + DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_PERIOD_KEY, 999999); - return new DecayRpcScheduler(priorityLevels, "ns", conf); + return new DecayRpcScheduler(priorityLevels, ns, conf); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java index e6a5f5e564c1f..1fed9a317642a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java @@ -104,6 +104,9 @@ public void testTotalCapacityOfSubQueues() { assertThat(fairCallQueue.remainingCapacity()).isEqualTo(1025); fairCallQueue = new FairCallQueue(7, 1025, "ns", conf); assertThat(fairCallQueue.remainingCapacity()).isEqualTo(1025); + fairCallQueue = new FairCallQueue(7, 1025, "ns", + new int[]{7, 6, 5, 4, 3, 2, 1}, conf); + assertThat(fairCallQueue.remainingCapacity()).isEqualTo(1025); } @Test @@ -157,6 +160,61 @@ public int getAndAdvanceCurrentIndex() { assertNull(fcq.poll()); } + @Test + public void testQueueCapacity() { + int numQueues = 2; + int capacity = 4; + Configuration conf = new Configuration(); + List calls = new ArrayList<>(); + + // default weights i.e. all queues share capacity + fcq = new FairCallQueue(numQueues, 4, "ns", conf); + FairCallQueue fcq1 = new FairCallQueue( + numQueues, capacity, "ns", new int[]{1, 3}, conf); + + for (int i=0; i < capacity; i++) { + Schedulable call = mockCall("u", i%2); + calls.add(call); + fcq.add(call); + fcq1.add(call); + } + + final AtomicInteger currentIndex = new AtomicInteger(); + fcq.setMultiplexer(new RpcMultiplexer(){ + @Override + public int getAndAdvanceCurrentIndex() { + return currentIndex.get(); + } + }); + fcq1.setMultiplexer(new RpcMultiplexer(){ + @Override + public int getAndAdvanceCurrentIndex() { + return currentIndex.get(); + } + }); + + // either queue will have two calls + // v + // 0 1 + // 2 3 + currentIndex.set(1); + assertSame(calls.get(1), fcq.poll()); + assertSame(calls.get(3), fcq.poll()); + assertSame(calls.get(0), fcq.poll()); + assertSame(calls.get(2), fcq.poll()); + + // queues with different number of calls + // v + // 0 1 + // 2 + // 3 + currentIndex.set(1); + assertSame(calls.get(1), fcq1.poll()); + assertSame(calls.get(2), fcq1.poll()); + assertSame(calls.get(3), fcq1.poll()); + assertSame(calls.get(0), fcq1.poll()); + } + @SuppressWarnings("unchecked") @Test public void testInsertionWithFailover() { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java index 76a93cf71b03c..dfb9e934f6055 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java @@ -39,9 +39,9 @@ import org.junit.Assert; import org.junit.Test; -import com.google.protobuf.BlockingService; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.BlockingService; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; public class TestProtoBufRPCCompatibility { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java index 3053f87511885..facb8fdd8b191 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.ipc; -import com.google.protobuf.BlockingService; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.BlockingService; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java index 476b1979b2a54..32300d4f876e1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java @@ -26,9 +26,9 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; -import com.google.protobuf.BlockingService; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.BlockingService; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.protobuf.TestProtos; import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcHandoffProto; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java index 0da0b47529f99..640ca3d2b89ed 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ipc; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCServerShutdown.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCServerShutdown.java index aee8893538330..39705b06c67c0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCServerShutdown.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCServerShutdown.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ipc; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java index 2f2d36f7b45d7..bf24d680dde2e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java @@ -18,9 +18,9 @@ package org.apache.hadoop.ipc; -import com.google.protobuf.BlockingService; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.BlockingService; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcWritable.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcWritable.java index 837f5797121eb..6beae7d12b4c7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcWritable.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcWritable.java @@ -29,7 +29,7 @@ import org.junit.Assert; import org.junit.Test; -import com.google.protobuf.Message; +import org.apache.hadoop.thirdparty.protobuf.Message; public class TestRpcWritable {//extends TestRpcBase { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java index 4f8a6d29f7244..72f73822b6fd0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ipc; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/metrics/TestDecayRpcSchedulerDetailedMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/metrics/TestDecayRpcSchedulerDetailedMetrics.java new file mode 100644 index 0000000000000..01d407ba26010 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/metrics/TestDecayRpcSchedulerDetailedMetrics.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ipc.metrics; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.DecayRpcScheduler; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.junit.Test; + +public class TestDecayRpcSchedulerDetailedMetrics { + + @Test + public void metricsRegistered() { + Configuration conf = new Configuration(); + DecayRpcScheduler scheduler = new DecayRpcScheduler(4, "ipc.8020", conf); + MetricsSystem metricsSystem = DefaultMetricsSystem.instance(); + DecayRpcSchedulerDetailedMetrics metrics = + scheduler.getDecayRpcSchedulerDetailedMetrics(); + + assertNotNull(metricsSystem.getSource(metrics.getName())); + + scheduler.stop(); + + assertNull(metricsSystem.getSource(metrics.getName())); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java index b5f62b189040e..5d20abdd8bf10 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java @@ -149,6 +149,19 @@ interface TestProtocol { assertGauge("BarAvgTime", 0.0, rb); } + @Test public void testMutableRatesWithAggregationInitWithArray() { + MetricsRecordBuilder rb = mockMetricsRecordBuilder(); + MutableRatesWithAggregation rates = new MutableRatesWithAggregation(); + + rates.init(new String[]{"Foo", "Bar"}); + rates.snapshot(rb, false); + + assertCounter("FooNumOps", 0L, rb); + assertGauge("FooAvgTime", 0.0, rb); + assertCounter("BarNumOps", 0L, rb); + assertGauge("BarAvgTime", 0.0, rb); + } + @Test public void testMutableRatesWithAggregationSingleThread() { MutableRatesWithAggregation rates = new MutableRatesWithAggregation(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java index 37a3a2affccfe..6fdd64dca7c30 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java @@ -18,6 +18,7 @@ package org.apache.hadoop.metrics2.source; +import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; import org.apache.hadoop.util.GcTimeMonitor; import org.junit.After; import org.junit.Assert; @@ -37,6 +38,7 @@ import org.apache.hadoop.util.JvmPauseMonitor; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; import static org.apache.hadoop.metrics2.source.JvmMetricsInfo.*; @@ -65,7 +67,7 @@ public void testJvmPauseMonitorPresence() { pauseMonitor = new JvmPauseMonitor(); pauseMonitor.init(new Configuration()); pauseMonitor.start(); - JvmMetrics jvmMetrics = new JvmMetrics("test", "test"); + JvmMetrics jvmMetrics = new JvmMetrics("test", "test", false); jvmMetrics.setPauseMonitor(pauseMonitor); MetricsRecordBuilder rb = getMetrics(jvmMetrics); MetricsCollector mc = rb.parent(); @@ -91,7 +93,7 @@ public void testJvmPauseMonitorPresence() { public void testGcTimeMonitorPresence() { gcTimeMonitor = new GcTimeMonitor(60000, 1000, 70, null); gcTimeMonitor.start(); - JvmMetrics jvmMetrics = new JvmMetrics("test", "test"); + JvmMetrics jvmMetrics = new JvmMetrics("test", "test", false); jvmMetrics.setGcTimeMonitor(gcTimeMonitor); MetricsRecordBuilder rb = getMetrics(jvmMetrics); MetricsCollector mc = rb.parent(); @@ -226,4 +228,89 @@ public void testJvmMetricsSingletonWithDifferentProcessNames() { Assert.assertEquals("unexpected process name of the singleton instance", process1Name, jvmMetrics2.processName); } + + /** + * Performance test for JvmMetrics#getMetrics, comparing performance of + * getting thread usage from ThreadMXBean with that from ThreadGroup. + */ + @Test + public void testGetMetricsPerf() { + JvmMetrics jvmMetricsUseMXBean = new JvmMetrics("test", "test", true); + JvmMetrics jvmMetrics = new JvmMetrics("test", "test", false); + MetricsCollectorImpl collector = new MetricsCollectorImpl(); + // warm up + jvmMetrics.getMetrics(collector, true); + jvmMetricsUseMXBean.getMetrics(collector, true); + // test cases with different numbers of threads + int[] numThreadsCases = {100, 200, 500, 1000, 2000, 3000}; + List threads = new ArrayList(); + for (int numThreads : numThreadsCases) { + updateThreadsAndWait(threads, numThreads); + long startNs = System.nanoTime(); + jvmMetricsUseMXBean.getMetrics(collector, true); + long processingNsFromMXBean = System.nanoTime() - startNs; + startNs = System.nanoTime(); + jvmMetrics.getMetrics(collector, true); + long processingNsFromGroup = System.nanoTime() - startNs; + System.out.println( + "#Threads=" + numThreads + ", ThreadMXBean=" + processingNsFromMXBean + + " ns, ThreadGroup=" + processingNsFromGroup + " ns, ratio: " + ( + processingNsFromMXBean / processingNsFromGroup)); + } + // cleanup + updateThreadsAndWait(threads, 0); + } + + private static void updateThreadsAndWait(List threads, + int expectedNumThreads) { + // add/remove threads according to expected number + int addNum = expectedNumThreads - threads.size(); + if (addNum > 0) { + for (int i = 0; i < addNum; i++) { + TestThread testThread = new TestThread(); + testThread.start(); + threads.add(testThread); + } + } else if (addNum < 0) { + for (int i = 0; i < Math.abs(addNum); i++) { + threads.get(i).exit = true; + } + } else { + return; + } + // wait for threads to reach the expected number + while (true) { + Iterator it = threads.iterator(); + while (it.hasNext()) { + if (it.next().exited) { + it.remove(); + } + } + if (threads.size() == expectedNumThreads) { + break; + } else { + try { + Thread.sleep(500); + } catch (InterruptedException e) { + //ignore + } + } + } + } + + static class TestThread extends Thread { + private volatile boolean exit = false; + private boolean exited = false; + @Override + public void run() { + while (!exit) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + exited = true; + } + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/MockDomainNameResolver.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/MockDomainNameResolver.java index aa9370933722f..3e436f3a22f8c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/MockDomainNameResolver.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/MockDomainNameResolver.java @@ -39,8 +39,8 @@ public class MockDomainNameResolver implements DomainNameResolver { public static final byte[] BYTE_ADDR_2 = new byte[]{10, 1, 1, 2}; public static final String ADDR_1 = "10.1.1.1"; public static final String ADDR_2 = "10.1.1.2"; - public static final String FQDN_1 = "host01.com"; - public static final String FQDN_2 = "host02.com"; + public static final String FQDN_1 = "host01.test"; + public static final String FQDN_2 = "host02.test"; /** Internal mapping of domain names and IP addresses. */ private Map addrs = new TreeMap<>(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java index fbed6052a5c03..328cf11c20fa6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java @@ -18,6 +18,7 @@ package org.apache.hadoop.net; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Arrays; @@ -193,6 +194,10 @@ public void testChooseRandom() { } assertEquals("Random is not selecting the nodes it should", 2, histogram.size()); + + Node val = cluster.chooseRandom("/d1", "/d", Collections.emptyList()); + assertNotNull(val); + } @Test @@ -229,6 +234,15 @@ public void testChooseRandomExcluded() { assertSame("node3", node.getName()); } + @Test + public void testNodeBaseNormalizeRemoveLeadingSlash() { + assertEquals("/d1", NodeBase.normalize("/d1///")); + assertEquals("/d1", NodeBase.normalize("/d1/")); + assertEquals("/d1", NodeBase.normalize("/d1")); + assertEquals("", NodeBase.normalize("///")); + assertEquals("", NodeBase.normalize("/")); + } + private NodeElement getNewNode(String name, String rackLocation) { NodeElement node = new NodeElement(name); node.setNetworkLocation(rackLocation); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java index b11b1e96ded59..76284932c43de 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java @@ -95,7 +95,25 @@ public void testAvoidLoopbackTcpSockets() throws Throwable { assertInException(se, "Invalid argument"); } } - + + @Test + public void testInvalidAddress() throws Throwable { + Configuration conf = new Configuration(); + + Socket socket = NetUtils.getDefaultSocketFactory(conf) + .createSocket(); + socket.bind(new InetSocketAddress("127.0.0.1", 0)); + try { + NetUtils.connect(socket, + new InetSocketAddress("invalid-test-host", + 0), 20000); + socket.close(); + fail("Should not have connected"); + } catch (UnknownHostException uhe) { + LOG.info("Got exception: ", uhe); + } + } + @Test public void testSocketReadTimeoutWithChannel() throws Exception { doSocketReadTimeoutTest(true); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java index f1c03cf5df470..76c74a37a0695 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java @@ -40,7 +40,7 @@ import static org.junit.Assert.*; /** - * This tests timout out from SocketInputStream and + * This tests timeout out from SocketInputStream and * SocketOutputStream using pipes. * * Normal read and write using these streams are tested by pretty much @@ -185,4 +185,42 @@ public void doWork() throws Exception { } } } + + @Test + public void testSocketIOWithTimeoutInterrupted() throws Exception { + Pipe pipe = Pipe.open(); + final int timeout = TIMEOUT * 10; + + try (Pipe.SourceChannel source = pipe.source(); + InputStream in = new SocketInputStream(source, timeout)) { + + TestingThread thread = new TestingThread(ctx) { + @Override + public void doWork() throws Exception { + try { + in.read(); + fail("Did not fail with interrupt"); + } catch (InterruptedIOException ste) { + String detail = ste.getMessage(); + String totalString = "Total timeout mills is " + timeout; + String leftString = "millis timeout left"; + + assertTrue(detail.contains(totalString)); + assertTrue(detail.contains(leftString)); + } + } + }; + + ctx.addThread(thread); + ctx.startThreads(); + // If the thread is interrupted before it calls read() + // then it throws ClosedByInterruptException due to + // some Java quirk. Waiting for it to call read() + // gets it into select(), so we get the expected + // InterruptedIOException. + Thread.sleep(1000); + thread.interrupt(); + ctx.stop(); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java index 3293903e6470b..c86b9ae344195 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.security; -import com.google.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.Text; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java index 4c471da4e8c35..46e9f92258502 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java @@ -22,6 +22,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; import java.util.Set; @@ -66,7 +67,7 @@ public void setup() throws IOException { public static class FakeGroupMapping extends ShellBasedUnixGroupsMapping { // any to n mapping - private static Set allGroups = new HashSet(); + private static Set allGroups = new LinkedHashSet(); private static Set blackList = new HashSet(); private static int requestCount = 0; private static long getGroupsDelayMs = 0; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java index 0d30e6e410be1..f027d3b39bec1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java @@ -68,6 +68,10 @@ public class KeyStoreTestUtil { + public final static String SERVER_KEY_STORE_PASSWORD_DEFAULT = "serverP"; + public final static String CLIENT_KEY_STORE_PASSWORD_DEFAULT = "clientP"; + public final static String TRUST_STORE_PASSWORD_DEFAULT = "trustP"; + public static String getClasspathDir(Class klass) throws Exception { String file = klass.getName(); file = file.replace('.', '/') + ".class"; @@ -257,30 +261,57 @@ public static void setupSSLConfig(String keystoresDir, String sslConfDir, setupSSLConfig(keystoresDir, sslConfDir, conf, useClientCert, true,""); } - /** - * Performs complete setup of SSL configuration in preparation for testing an - * SSLFactory. This includes keys, certs, keystores, truststores, the server - * SSL configuration file, the client SSL configuration file, and the master - * configuration file read by the SSLFactory. - * - * @param keystoresDir - * @param sslConfDir - * @param conf - * @param useClientCert - * @param trustStore - * @param excludeCiphers - * @throws Exception - */ - public static void setupSSLConfig(String keystoresDir, String sslConfDir, - Configuration conf, boolean useClientCert, - boolean trustStore, String excludeCiphers) - throws Exception { + /** + * Performs complete setup of SSL configuration in preparation for testing an + * SSLFactory. This includes keys, certs, keystores, truststores, the server + * SSL configuration file, the client SSL configuration file, and the master + * configuration file read by the SSLFactory. + * + * @param keystoresDir + * @param sslConfDir + * @param conf + * @param useClientCert + * @param trustStore + * @param excludeCiphers + * @throws Exception + */ + public static void setupSSLConfig(String keystoresDir, String sslConfDir, + Configuration conf, boolean useClientCert, boolean trustStore, + String excludeCiphers) throws Exception { + setupSSLConfig(keystoresDir, sslConfDir, conf, useClientCert, trustStore, + excludeCiphers, SERVER_KEY_STORE_PASSWORD_DEFAULT, + CLIENT_KEY_STORE_PASSWORD_DEFAULT, TRUST_STORE_PASSWORD_DEFAULT); + } + + + /** + * Performs complete setup of SSL configuration in preparation for testing an + * SSLFactory. This includes keys, certs, keystores, truststores, the server + * SSL configuration file, the client SSL configuration file, and the master + * configuration file read by the SSLFactory and the passwords required to + * access the keyStores (Server and Client KeyStore Passwords and + * TrustStore Password). + * + * @param keystoresDir + * @param sslConfDir + * @param conf + * @param useClientCert + * @param trustStore + * @param excludeCiphers + * @param serverPassword + * @param clientPassword + * @param trustPassword + * @throws Exception + */ + @SuppressWarnings("checkstyle:parameternumber") + public static void setupSSLConfig(String keystoresDir, String sslConfDir, + Configuration conf, boolean useClientCert, boolean trustStore, + String excludeCiphers, String serverPassword, String clientPassword, + String trustPassword) throws Exception { + String clientKS = keystoresDir + "/clientKS.jks"; - String clientPassword = "clientP"; String serverKS = keystoresDir + "/serverKS.jks"; - String serverPassword = "serverP"; String trustKS = null; - String trustPassword = "trustP"; File sslClientConfFile = new File(sslConfDir, getClientSSLConfigFileName()); File sslServerConfFile = new File(sslConfDir, getServerSSLConfigFileName()); @@ -310,10 +341,10 @@ public static void setupSSLConfig(String keystoresDir, String sslConfDir, KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs); } - Configuration clientSSLConf = createClientSSLConfig(clientKS, clientPassword, - clientPassword, trustKS, excludeCiphers); - Configuration serverSSLConf = createServerSSLConfig(serverKS, serverPassword, - serverPassword, trustKS, excludeCiphers); + Configuration clientSSLConf = createClientSSLConfig(clientKS, + clientPassword, clientPassword, trustKS, trustPassword, excludeCiphers); + Configuration serverSSLConf = createServerSSLConfig(serverKS, + serverPassword, serverPassword, trustKS, trustPassword, excludeCiphers); saveConfig(sslClientConfFile, clientSSLConf); saveConfig(sslServerConfFile, serverSSLConf); @@ -336,9 +367,10 @@ public static void setupSSLConfig(String keystoresDir, String sslConfDir, * @return Configuration for client SSL */ public static Configuration createClientSSLConfig(String clientKS, - String password, String keyPassword, String trustKS) { + String password, String keyPassword, String trustKS, + String trustPassword) { return createSSLConfig(SSLFactory.Mode.CLIENT, - clientKS, password, keyPassword, trustKS, ""); + clientKS, password, keyPassword, trustKS, trustPassword, ""); } /** @@ -353,10 +385,11 @@ public static Configuration createClientSSLConfig(String clientKS, * @param excludeCiphers String comma separated ciphers to exclude * @return Configuration for client SSL */ - public static Configuration createClientSSLConfig(String clientKS, - String password, String keyPassword, String trustKS, String excludeCiphers) { + public static Configuration createClientSSLConfig(String clientKS, + String password, String keyPassword, String trustKS, + String trustPassword, String excludeCiphers) { return createSSLConfig(SSLFactory.Mode.CLIENT, - clientKS, password, keyPassword, trustKS, excludeCiphers); + clientKS, password, keyPassword, trustKS, trustPassword, excludeCiphers); } /** @@ -372,9 +405,10 @@ public static Configuration createClientSSLConfig(String clientKS, * @throws java.io.IOException */ public static Configuration createServerSSLConfig(String serverKS, - String password, String keyPassword, String trustKS) throws IOException { + String password, String keyPassword, String trustKS, String trustPassword) + throws IOException { return createSSLConfig(SSLFactory.Mode.SERVER, - serverKS, password, keyPassword, trustKS, ""); + serverKS, password, keyPassword, trustKS, trustPassword, ""); } /** @@ -390,10 +424,11 @@ public static Configuration createServerSSLConfig(String serverKS, * @return * @throws IOException */ - public static Configuration createServerSSLConfig(String serverKS, - String password, String keyPassword, String trustKS, String excludeCiphers) throws IOException { + public static Configuration createServerSSLConfig(String serverKS, + String password, String keyPassword, String trustKS, String trustPassword, + String excludeCiphers) throws IOException { return createSSLConfig(SSLFactory.Mode.SERVER, - serverKS, password, keyPassword, trustKS, excludeCiphers); + serverKS, password, keyPassword, trustKS, trustPassword, excludeCiphers); } /** @@ -445,8 +480,8 @@ private static String getSSLConfigFileName(String base) { * @return Configuration for SSL */ private static Configuration createSSLConfig(SSLFactory.Mode mode, - String keystore, String password, String keyPassword, String trustKS, String excludeCiphers) { - String trustPassword = "trustP"; + String keystore, String password, String keyPassword, String trustKS, + String trustStorePwd, String excludeCiphers) { Configuration sslConf = new Configuration(false); if (keystore != null) { @@ -466,10 +501,10 @@ private static Configuration createSSLConfig(SSLFactory.Mode mode, sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY), trustKS); } - if (trustPassword != null) { + if (trustStorePwd != null) { sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), - trustPassword); + trustStorePwd); } if(null != excludeCiphers && !excludeCiphers.isEmpty()) { sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java index 9f149b74277e9..9b4d1f205ff58 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.security.ssl; +import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.TRUST_STORE_PASSWORD_DEFAULT; import static org.junit.Assert.assertTrue; import org.apache.hadoop.conf.Configuration; @@ -407,7 +408,7 @@ private void checkSSLFactoryInitWithPasswords(SSLFactory.Mode mode, String keystore = new File(KEYSTORES_DIR, "keystore.jks").getAbsolutePath(); String truststore = new File(KEYSTORES_DIR, "truststore.jks") .getAbsolutePath(); - String trustPassword = "trustP"; + String trustPassword = TRUST_STORE_PASSWORD_DEFAULT; // Create keys, certs, keystore, and truststore. KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); @@ -433,7 +434,7 @@ private void checkSSLFactoryInitWithPasswords(SSLFactory.Mode mode, if (mode == SSLFactory.Mode.SERVER) { sslConfFileName = "ssl-server.xml"; sslConf = KeyStoreTestUtil.createServerSSLConfig(keystore, confPassword, - confKeyPassword, truststore); + confKeyPassword, truststore, trustPassword); if (useCredProvider) { File testDir = GenericTestUtils.getTestDir(); final Path jksPath = new Path(testDir.toString(), "test.jks"); @@ -444,7 +445,7 @@ private void checkSSLFactoryInitWithPasswords(SSLFactory.Mode mode, } else { sslConfFileName = "ssl-client.xml"; sslConf = KeyStoreTestUtil.createClientSSLConfig(keystore, confPassword, - confKeyPassword, truststore); + confKeyPassword, truststore, trustPassword); } KeyStoreTestUtil.saveConfig(new File(sslConfsDir, sslConfFileName), sslConf); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java index c9571ff21e847..b2e177976b6d5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java @@ -217,6 +217,58 @@ public void testNodeUpAferAWhile() throws Exception { } } + @SuppressWarnings("unchecked") + @Test + public void testMultiNodeCompeteForSeqNum() throws Exception { + DelegationTokenManager tm1, tm2 = null; + String connectString = zkServer.getConnectString(); + Configuration conf = getSecretConf(connectString); + conf.setInt( + ZKDelegationTokenSecretManager.ZK_DTSM_TOKEN_SEQNUM_BATCH_SIZE, 1000); + tm1 = new DelegationTokenManager(conf, new Text("bla")); + tm1.init(); + + Token token1 = + (Token) tm1.createToken( + UserGroupInformation.getCurrentUser(), "foo"); + Assert.assertNotNull(token1); + AbstractDelegationTokenIdentifier id1 = + tm1.getDelegationTokenSecretManager().decodeTokenIdentifier(token1); + Assert.assertEquals( + "Token seq should be the same", 1, id1.getSequenceNumber()); + Token token2 = + (Token) tm1.createToken( + UserGroupInformation.getCurrentUser(), "foo"); + Assert.assertNotNull(token2); + AbstractDelegationTokenIdentifier id2 = + tm1.getDelegationTokenSecretManager().decodeTokenIdentifier(token2); + Assert.assertEquals( + "Token seq should be the same", 2, id2.getSequenceNumber()); + + tm2 = new DelegationTokenManager(conf, new Text("bla")); + tm2.init(); + + Token token3 = + (Token) tm2.createToken( + UserGroupInformation.getCurrentUser(), "foo"); + Assert.assertNotNull(token3); + AbstractDelegationTokenIdentifier id3 = + tm2.getDelegationTokenSecretManager().decodeTokenIdentifier(token3); + Assert.assertEquals( + "Token seq should be the same", 1001, id3.getSequenceNumber()); + Token token4 = + (Token) tm2.createToken( + UserGroupInformation.getCurrentUser(), "foo"); + Assert.assertNotNull(token4); + AbstractDelegationTokenIdentifier id4 = + tm2.getDelegationTokenSecretManager().decodeTokenIdentifier(token4); + Assert.assertEquals( + "Token seq should be the same", 1002, id4.getSequenceNumber()); + + verifyDestroy(tm1, conf); + verifyDestroy(tm2, conf); + } + @SuppressWarnings("unchecked") @Test public void testRenewTokenSingleManager() throws Exception { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/AbstractHadoopTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/AbstractHadoopTestBase.java new file mode 100644 index 0000000000000..e18119ccafcb8 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/AbstractHadoopTestBase.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.test; + +import java.util.concurrent.TimeUnit; + +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TestName; +import org.junit.rules.Timeout; + +/** + * A base class for JUnit5+ tests that sets a default timeout for all tests + * that subclass this test. + * + * Threads are named to the method being executed, for ease of diagnostics + * in logs and thread dumps. + * + * Unlike {@link HadoopTestBase} this class does not extend JUnit Assert + * so is easier to use with AssertJ. + */ +public abstract class AbstractHadoopTestBase { + + /** + * System property name to set the test timeout: {@value}. + */ + public static final String PROPERTY_TEST_DEFAULT_TIMEOUT = + "test.default.timeout"; + + /** + * The default timeout (in milliseconds) if the system property + * {@link #PROPERTY_TEST_DEFAULT_TIMEOUT} + * is not set: {@value}. + */ + public static final int TEST_DEFAULT_TIMEOUT_VALUE = 100000; + + /** + * The JUnit rule that sets the default timeout for tests. + */ + @Rule + public Timeout defaultTimeout = retrieveTestTimeout(); + + /** + * Retrieve the test timeout from the system property + * {@link #PROPERTY_TEST_DEFAULT_TIMEOUT}, falling back to + * the value in {@link #TEST_DEFAULT_TIMEOUT_VALUE} if the + * property is not defined. + * @return the recommended timeout for tests + */ + public static Timeout retrieveTestTimeout() { + String propval = System.getProperty(PROPERTY_TEST_DEFAULT_TIMEOUT, + Integer.toString( + TEST_DEFAULT_TIMEOUT_VALUE)); + int millis; + try { + millis = Integer.parseInt(propval); + } catch (NumberFormatException e) { + //fall back to the default value, as the property cannot be parsed + millis = TEST_DEFAULT_TIMEOUT_VALUE; + } + return new Timeout(millis, TimeUnit.MILLISECONDS); + } + + /** + * The method name. + */ + @Rule + public TestName methodName = new TestName(); + + /** + * Get the method name; defaults to the value of {@link #methodName}. + * Subclasses may wish to override it, which will tune the thread naming. + * @return the name of the method. + */ + protected String getMethodName() { + return methodName.getMethodName(); + } + + /** + * Static initializer names this thread "JUnit". + */ + @BeforeClass + public static void nameTestThread() { + Thread.currentThread().setName("JUnit"); + } + + /** + * Before each method, the thread is renamed to match the method name. + */ + @Before + public void nameThreadToMethod() { + Thread.currentThread().setName("JUnit-" + getMethodName()); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java index 0082452e514b5..9e91634873607 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java @@ -61,7 +61,6 @@ import org.mockito.stubbing.Answer; import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; import com.google.common.base.Supplier; import com.google.common.collect.Sets; @@ -378,11 +377,15 @@ public static void assertExceptionContains(String expectedText, * time * @throws InterruptedException if the method is interrupted while waiting */ - public static void waitFor(Supplier check, int checkEveryMillis, - int waitForMillis) throws TimeoutException, InterruptedException { - Preconditions.checkNotNull(check, ERROR_MISSING_ARGUMENT); - Preconditions.checkArgument(waitForMillis >= checkEveryMillis, - ERROR_INVALID_ARGUMENT); + public static void waitFor(final Supplier check, + final long checkEveryMillis, final long waitForMillis) + throws TimeoutException, InterruptedException { + if (check == null) { + throw new NullPointerException(ERROR_MISSING_ARGUMENT); + } + if (waitForMillis < checkEveryMillis) { + throw new IllegalArgumentException(ERROR_INVALID_ARGUMENT); + } long st = Time.monotonicNow(); boolean result = check.get(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java index cb7df4b011a2f..2e34054d55322 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.test; +import java.util.concurrent.TimeUnit; + import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; @@ -59,7 +61,7 @@ public abstract class HadoopTestBase extends Assert { * property is not defined. * @return the recommended timeout for tests */ - public static Timeout retrieveTestTimeout() { + protected Timeout retrieveTestTimeout() { String propval = System.getProperty(PROPERTY_TEST_DEFAULT_TIMEOUT, Integer.toString( TEST_DEFAULT_TIMEOUT_VALUE)); @@ -70,7 +72,7 @@ public static Timeout retrieveTestTimeout() { //fall back to the default value, as the property cannot be parsed millis = TEST_DEFAULT_TIMEOUT_VALUE; } - return new Timeout(millis); + return new Timeout(millis, TimeUnit.MILLISECONDS); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java index db36154c158ac..ad265afc3a022 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java @@ -406,7 +406,7 @@ public static E intercept( throws Exception { try { eval.call(); - throw new AssertionError("Expected an exception"); + throw new AssertionError("Expected an exception of type " + clazz); } catch (Throwable e) { if (clazz.isAssignableFrom(e.getClass())) { return (E)e; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java index 1b17ce7cc9d8b..05d66d39f56ee 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java @@ -29,17 +29,19 @@ @InterfaceAudience.Private @InterfaceStability.Unstable public class FakeTimer extends Timer { + private long now; private long nowNanos; /** Constructs a FakeTimer with a non-zero value */ public FakeTimer() { // Initialize with a non-trivial value. + now = 1577836800000L; // 2020-01-01 00:00:00,000+0000 nowNanos = TimeUnit.MILLISECONDS.toNanos(1000); } @Override public long now() { - return TimeUnit.NANOSECONDS.toMillis(nowNanos); + return now; } @Override @@ -54,6 +56,7 @@ public long monotonicNowNanos() { /** Increases the time by milliseconds */ public void advance(long advMillis) { + now += advMillis; nowNanos += TimeUnit.MILLISECONDS.toNanos(advMillis); } @@ -62,6 +65,7 @@ public void advance(long advMillis) { * @param advNanos Nanoseconds to advance by. */ public void advanceNanos(long advNanos) { + now += TimeUnit.NANOSECONDS.toMillis(advNanos); nowNanos += advNanos; } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDurationInfo.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDurationInfo.java index d1fa70319eb84..b6abde8762902 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDurationInfo.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDurationInfo.java @@ -35,6 +35,14 @@ public void testDurationInfoCreation() throws Exception { Thread.sleep(1000); info.finished(); Assert.assertTrue(info.value() > 0); + + info = new DurationInfo(log, true, "test format %s", "value"); + Assert.assertEquals("test format value: duration 0:00.000s", + info.toString()); + + info = new DurationInfo(log, false, "test format %s", "value"); + Assert.assertEquals("test format value: duration 0:00.000s", + info.toString()); } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java index fd9966feb064e..60dda981d4a1a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java @@ -19,6 +19,7 @@ import java.io.File; import java.io.FileWriter; +import java.io.IOException; import java.nio.file.NoSuchFileException; import java.util.Map; @@ -347,4 +348,62 @@ public void testHostFileReaderWithTimeout() throws Exception { assertTrue(excludes.get("host5") == 1800); assertTrue(excludes.get("host6") == 1800); } -} + + @Test + public void testLazyRefresh() throws IOException { + FileWriter efw = new FileWriter(excludesFile); + FileWriter ifw = new FileWriter(includesFile); + + efw.write("host1\n"); + efw.write("host2\n"); + efw.close(); + ifw.write("host3\n"); + ifw.write("host4\n"); + ifw.close(); + + HostsFileReader hfp = new HostsFileReader(includesFile, excludesFile); + + ifw = new FileWriter(includesFile); + ifw.close(); + + efw = new FileWriter(excludesFile, true); + efw.write("host3\n"); + efw.write("host4\n"); + efw.close(); + + hfp.lazyRefresh(includesFile, excludesFile); + + HostDetails details = hfp.getHostDetails(); + HostDetails lazyDetails = hfp.getLazyLoadedHostDetails(); + + assertEquals("Details: no. of excluded hosts", 2, + details.getExcludedHosts().size()); + assertEquals("Details: no. of included hosts", 2, + details.getIncludedHosts().size()); + assertEquals("LazyDetails: no. of excluded hosts", 4, + lazyDetails.getExcludedHosts().size()); + assertEquals("LayDetails: no. of included hosts", 0, + lazyDetails.getIncludedHosts().size()); + + hfp.finishRefresh(); + + details = hfp.getHostDetails(); + assertEquals("Details: no. of excluded hosts", 4, + details.getExcludedHosts().size()); + assertEquals("Details: no. of included hosts", 0, + details.getIncludedHosts().size()); + assertNull("Lazy host details should be null", + hfp.getLazyLoadedHostDetails()); + } + + @Test(expected = IllegalStateException.class) + public void testFinishRefreshWithoutLazyRefresh() throws IOException { + FileWriter efw = new FileWriter(excludesFile); + FileWriter ifw = new FileWriter(includesFile); + efw.close(); + ifw.close(); + + HostsFileReader hfp = new HostsFileReader(includesFile, excludesFile); + hfp.finishRefresh(); + } +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java index 44158ec5b0f18..c47ff0712d201 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.util; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -117,12 +119,14 @@ public long monotonicNow() { final AtomicLong wlogged = new AtomicLong(0); final AtomicLong wsuppresed = new AtomicLong(0); + final AtomicLong wMaxWait = new AtomicLong(0); InstrumentedLock lock = new InstrumentedLock( testname, LOG, mlock, 2000, 300, mclock) { @Override - void logWarning(long lockHeldTime, long suppressed) { + void logWarning(long lockHeldTime, SuppressedSnapshot stats) { wlogged.incrementAndGet(); - wsuppresed.set(suppressed); + wsuppresed.set(stats.getSuppressedCount()); + wMaxWait.set(stats.getMaxSuppressedWait()); } }; @@ -132,12 +136,14 @@ void logWarning(long lockHeldTime, long suppressed) { lock.unlock(); // t = 200 assertEquals(0, wlogged.get()); assertEquals(0, wsuppresed.get()); + assertEquals(0, wMaxWait.get()); lock.lock(); // t = 200 time.set(700); lock.unlock(); // t = 700 assertEquals(1, wlogged.get()); assertEquals(0, wsuppresed.get()); + assertEquals(0, wMaxWait.get()); // despite the lock held time is greater than threshold // suppress the log warning due to the logging gap @@ -147,6 +153,7 @@ void logWarning(long lockHeldTime, long suppressed) { lock.unlock(); // t = 1100 assertEquals(1, wlogged.get()); assertEquals(0, wsuppresed.get()); + assertEquals(0, wMaxWait.get()); // log a warning message when the lock held time is greater the threshold // and the logging time gap is satisfied. Also should display suppressed @@ -157,6 +164,106 @@ void logWarning(long lockHeldTime, long suppressed) { lock.unlock(); // t = 2800 assertEquals(2, wlogged.get()); assertEquals(1, wsuppresed.get()); + assertEquals(400, wMaxWait.get()); + } + + /** + * Test the lock logs warning when lock wait / queue time is greater than + * threshold and not log warning otherwise. + * @throws Exception + */ + @Test(timeout=10000) + public void testLockLongWaitReport() throws Exception { + String testname = name.getMethodName(); + final AtomicLong time = new AtomicLong(0); + Timer mclock = new Timer() { + @Override + public long monotonicNow() { + return time.get(); + } + }; + Lock mlock = new ReentrantLock(true); //mock(Lock.class); + + final AtomicLong wlogged = new AtomicLong(0); + final AtomicLong wsuppresed = new AtomicLong(0); + final AtomicLong wMaxWait = new AtomicLong(0); + InstrumentedLock lock = new InstrumentedLock( + testname, LOG, mlock, 2000, 300, mclock) { + @Override + void logWaitWarning(long lockHeldTime, SuppressedSnapshot stats) { + wlogged.incrementAndGet(); + wsuppresed.set(stats.getSuppressedCount()); + wMaxWait.set(stats.getMaxSuppressedWait()); + } + }; + + // do not log warning when the lock held time is short + lock.lock(); // t = 0 + + Thread competingThread = lockUnlockThread(lock); + time.set(200); + lock.unlock(); // t = 200 + competingThread.join(); + assertEquals(0, wlogged.get()); + assertEquals(0, wsuppresed.get()); + assertEquals(0, wMaxWait.get()); + + + lock.lock(); // t = 200 + competingThread = lockUnlockThread(lock); + time.set(700); + lock.unlock(); // t = 700 + competingThread.join(); + + // The competing thread will have waited for 500ms, so it should log + assertEquals(1, wlogged.get()); + assertEquals(0, wsuppresed.get()); + assertEquals(0, wMaxWait.get()); + + // despite the lock wait time is greater than threshold + // suppress the log warning due to the logging gap + // (not recorded in wsuppressed until next log message) + lock.lock(); // t = 700 + competingThread = lockUnlockThread(lock); + time.set(1100); + lock.unlock(); // t = 1100 + competingThread.join(); + assertEquals(1, wlogged.get()); + assertEquals(0, wsuppresed.get()); + assertEquals(0, wMaxWait.get()); + + // log a warning message when the lock held time is greater the threshold + // and the logging time gap is satisfied. Also should display suppressed + // previous warnings. + time.set(2400); + lock.lock(); // t = 2400 + competingThread = lockUnlockThread(lock); + time.set(2800); + lock.unlock(); // t = 2800 + competingThread.join(); + assertEquals(2, wlogged.get()); + assertEquals(1, wsuppresed.get()); + assertEquals(400, wMaxWait.get()); + } + + private Thread lockUnlockThread(Lock lock) throws InterruptedException { + CountDownLatch countDownLatch = new CountDownLatch(1); + Thread t = new Thread(() -> { + try { + assertFalse(lock.tryLock()); + countDownLatch.countDown(); + lock.lock(); + } finally { + lock.unlock(); + } + }); + t.start(); + countDownLatch.await(); + // Even with the countdown latch, the main thread releases the lock + // before this thread actually starts waiting on it, so introducing a + // short sleep so the competing thread can block on the lock as intended. + Thread.sleep(3); + return t; } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java index 3e1a88bd0ad22..1ea3ef1860860 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java @@ -146,9 +146,10 @@ public long monotonicNow() { InstrumentedReadLock readLock = new InstrumentedReadLock(testname, LOG, readWriteLock, 2000, 300, mclock) { @Override - protected void logWarning(long lockHeldTime, long suppressed) { + protected void logWarning( + long lockHeldTime, SuppressedSnapshot stats) { wlogged.incrementAndGet(); - wsuppresed.set(suppressed); + wsuppresed.set(stats.getSuppressedCount()); } }; @@ -200,9 +201,9 @@ public long monotonicNow() { InstrumentedWriteLock writeLock = new InstrumentedWriteLock(testname, LOG, readWriteLock, 2000, 300, mclock) { @Override - protected void logWarning(long lockHeldTime, long suppressed) { + protected void logWarning(long lockHeldTime, SuppressedSnapshot stats) { wlogged.incrementAndGet(); - wsuppresed.set(suppressed); + wsuppresed.set(stats.getSuppressedCount()); } }; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java deleted file mode 100644 index 2748c0b581a88..0000000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java +++ /dev/null @@ -1,145 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.util; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.PrintWriter; -import java.util.TimerTask; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileContext; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -public class TestNodeHealthScriptRunner { - - protected static File testRootDir = new File("target", - TestNodeHealthScriptRunner.class.getName() + - "-localDir").getAbsoluteFile(); - - private File nodeHealthscriptFile = new File(testRootDir, - Shell.appendScriptExtension("failingscript")); - - @Before - public void setup() { - testRootDir.mkdirs(); - } - - @After - public void tearDown() throws Exception { - if (testRootDir.exists()) { - FileContext.getLocalFSFileContext().delete( - new Path(testRootDir.getAbsolutePath()), true); - } - } - - private void writeNodeHealthScriptFile(String scriptStr, boolean setExecutable) - throws IOException { - PrintWriter pw = null; - try { - FileUtil.setWritable(nodeHealthscriptFile, true); - FileUtil.setReadable(nodeHealthscriptFile, true); - pw = new PrintWriter(new FileOutputStream(nodeHealthscriptFile)); - pw.println(scriptStr); - pw.flush(); - } finally { - pw.close(); - } - FileUtil.setExecutable(nodeHealthscriptFile, setExecutable); - } - - @Test - public void testNodeHealthScriptShouldRun() throws IOException { - Assert.assertFalse("Node health script should start", - NodeHealthScriptRunner.shouldRun( - nodeHealthscriptFile.getAbsolutePath())); - writeNodeHealthScriptFile("", false); - // Node health script should not start if the node health script is not - // executable. - Assert.assertFalse("Node health script should start", - NodeHealthScriptRunner.shouldRun( - nodeHealthscriptFile.getAbsolutePath())); - writeNodeHealthScriptFile("", true); - Assert.assertTrue("Node health script should start", - NodeHealthScriptRunner.shouldRun( - nodeHealthscriptFile.getAbsolutePath())); - } - - @Test - public void testNodeHealthScript() throws Exception { - String errorScript = "echo ERROR\n echo \"Tracker not healthy\""; - String normalScript = "echo \"I am all fine\""; - String timeOutScript = - Shell.WINDOWS ? "@echo off\nping -n 4 127.0.0.1 >nul\necho \"I am fine\"" - : "sleep 4\necho \"I am fine\""; - String exitCodeScript = "exit 127"; - - Configuration conf = new Configuration(); - writeNodeHealthScriptFile(normalScript, true); - NodeHealthScriptRunner nodeHealthScriptRunner = new NodeHealthScriptRunner( - nodeHealthscriptFile.getAbsolutePath(), - 500, 1000, new String[] {}); - nodeHealthScriptRunner.init(conf); - TimerTask timerTask = nodeHealthScriptRunner.getTimerTask(); - - timerTask.run(); - // Normal Script runs successfully - Assert.assertTrue("Node health status reported unhealthy", - nodeHealthScriptRunner.isHealthy()); - Assert.assertEquals("", nodeHealthScriptRunner.getHealthReport()); - - // Error script. - writeNodeHealthScriptFile(errorScript, true); - // Run timer - timerTask.run(); - Assert.assertFalse("Node health status reported healthy", - nodeHealthScriptRunner.isHealthy()); - Assert.assertTrue( - nodeHealthScriptRunner.getHealthReport().contains("ERROR")); - - // Healthy script. - writeNodeHealthScriptFile(normalScript, true); - timerTask.run(); - Assert.assertTrue("Node health status reported unhealthy", - nodeHealthScriptRunner.isHealthy()); - Assert.assertEquals("", nodeHealthScriptRunner.getHealthReport()); - - // Timeout script. - writeNodeHealthScriptFile(timeOutScript, true); - timerTask.run(); - Assert.assertFalse("Node health status reported healthy even after timeout", - nodeHealthScriptRunner.isHealthy()); - Assert.assertEquals( - NodeHealthScriptRunner.NODE_HEALTH_SCRIPT_TIMED_OUT_MSG, - nodeHealthScriptRunner.getHealthReport()); - - // Exit code 127 - writeNodeHealthScriptFile(exitCodeScript, true); - timerTask.run(); - Assert.assertTrue("Node health status reported unhealthy", - nodeHealthScriptRunner.isHealthy()); - Assert.assertEquals("", nodeHealthScriptRunner.getHealthReport()); - } -} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestProtoUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestProtoUtil.java index 6b72089faab84..4792fd49b98cf 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestProtoUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestProtoUtil.java @@ -33,7 +33,7 @@ import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto.OperationProto; import org.junit.Test; -import com.google.protobuf.CodedOutputStream; +import org.apache.hadoop.thirdparty.protobuf.CodedOutputStream; public class TestProtoUtil { diff --git a/hadoop-common-project/hadoop-common/src/test/resources/contract/sftp.xml b/hadoop-common-project/hadoop-common/src/test/resources/contract/sftp.xml new file mode 100644 index 0000000000000..20a24b7e54061 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/resources/contract/sftp.xml @@ -0,0 +1,79 @@ + + + + + + + fs.contract.test.root-tests-enabled + false + + + + fs.contract.is-case-sensitive + true + + + + fs.contract.supports-append + false + + + + fs.contract.supports-atomic-directory-delete + true + + + + fs.contract.supports-atomic-rename + true + + + + fs.contract.supports-block-locality + false + + + + fs.contract.supports-concat + false + + + + fs.contract.supports-seek + true + + + + fs.contract.rejects-seek-past-eof + true + + + + fs.contract.supports-strict-exceptions + true + + + + fs.contract.supports-unix-permissions + false + + + diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml index 392d39170d5fe..cbc50b9d1c683 100644 --- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml +++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml @@ -282,7 +282,7 @@ RegexpComparator - ^-count \[-q\] \[-h\] \[-v\] \[-t \[<storage type>\]\] \[-u\] \[-x\] \[-e\] <path> \.\.\. :( )* + ^-count \[-q\] \[-h\] \[-v\] \[-t \[<storage type>\]\] \[-u\] \[-x\] \[-e\] \[-s\] <path> \.\.\. :( )* RegexpComparator @@ -496,7 +496,10 @@ RegexpComparator - ^-put \[-f\] \[-p\] \[-l\] \[-d\] <localsrc> \.\.\. <dst> :( )* + + RegexpComparator + ^-put \[-f\] \[-p\] \[-l\] \[-d\] \[-t <thread count>\] <localsrc> \.\.\. <dst> :\s* + RegexpComparator @@ -512,15 +515,19 @@ RegexpComparator - ^\s*-p Preserves access and modification times, ownership and the mode.( )* + ^\s*-p Preserves timestamps, ownership and the mode.( )* + + + RegexpComparator + ^\s*-f Overwrites the destination if it already exists.( )* RegexpComparator - ^\s*-f Overwrites the destination if it already exists.( )* + ^\s*-t <thread count> Number of threads to be used, default is 1.( )* RegexpComparator - ^\s*-l Allow DataNode to lazily persist the file to disk. Forces( )* + ^\s*-l Allow DataNode to lazily persist the file to disk. Forces( )* RegexpComparator @@ -532,7 +539,7 @@ RegexpComparator - ^\s*-d Skip creation of temporary file\(<dst>\._COPYING_\).( )* + ^\s*-d Skip creation of temporary file\(<dst>\._COPYING_\).( )* @@ -551,47 +558,7 @@ RegexpComparator - ^\s*Copy files from the local file system into fs.( )*Copying fails if the file already( )* - - - RegexpComparator - ^\s*exists, unless the -f flag is given.( )* - - - RegexpComparator - ^\s*Flags:( )* - - - RegexpComparator - ^\s*-p Preserves access and modification times, ownership and the( )* - - - RegexpComparator - ^\s*mode.( )* - - - RegexpComparator - ^\s*-f Overwrites the destination if it already exists.( )* - - - RegexpComparator - ^\s*-t <thread count> Number of threads to be used, default is 1.( )* - - - RegexpComparator - ^\s*-l Allow DataNode to lazily persist the file to disk. Forces( )* - - - RegexpComparator - ^\s*replication factor of 1. This flag will result in reduced( )* - - - RegexpComparator - ^\s*durability. Use with care.( )* - - - RegexpComparator - ^\s*-d Skip creation of temporary file\(<dst>\._COPYING_\).( )* + ^\s*Identical to the -put command\.\s* @@ -606,11 +573,14 @@ RegexpComparator - ^-moveFromLocal <localsrc> \.\.\. <dst> :\s* + ^-moveFromLocal \[-f\] \[-p\] \[-l\] \[-d\] <localsrc> \.\.\. <dst> :\s* RegexpComparator - ^( |\t)*Same as -put, except that the source is deleted after it's copied. + ^( |\t)*Same as -put, except that the source is deleted after it's copied + + RegexpComparator + ^\s* and -t option has not yet implemented. diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml index 6f4ff09952ac4..acfdeeac50b0a 100644 --- a/hadoop-common-project/hadoop-kms/pom.xml +++ b/hadoop-common-project/hadoop-kms/pom.xml @@ -22,11 +22,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project hadoop-kms - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT jar Apache Hadoop KMS @@ -186,6 +186,7 @@ org.apache.maven.plugins maven-surefire-plugin + ${ignoreTestFailure} 1 false 1 diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java index 0640e25b76c4b..da597b4da5f81 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java @@ -37,6 +37,7 @@ import org.apache.hadoop.util.VersionInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.slf4j.bridge.SLF4JBridgeHandler; @InterfaceAudience.Private public class KMSWebApp implements ServletContextListener { @@ -80,6 +81,11 @@ public class KMSWebApp implements ServletContextListener { private static KMSAudit kmsAudit; private static KeyProviderCryptoExtension keyProviderCryptoExtension; + static { + SLF4JBridgeHandler.removeHandlersForRootLogger(); + SLF4JBridgeHandler.install(); + } + @Override public void contextInitialized(ServletContextEvent sce) { try { diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java index 036231de70da2..639d85521c3ce 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java @@ -22,13 +22,16 @@ import java.net.MalformedURLException; import java.net.URI; import java.net.URL; +import java.util.LinkedHashSet; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.ConfigurationWithLogging; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; +import org.apache.hadoop.security.AuthenticationFilterInitializer; +import org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.JvmPauseMonitor; @@ -95,6 +98,22 @@ public class KMSWebServer { KMSConfiguration.HTTP_PORT_DEFAULT); URI endpoint = new URI(scheme, null, host, port, null, null, null); + String configuredInitializers = + conf.get(HttpServer2.FILTER_INITIALIZER_PROPERTY); + if (configuredInitializers != null) { + Set target = new LinkedHashSet(); + String[] initializers = configuredInitializers.split(","); + for (String init : initializers) { + if (!init.equals(AuthenticationFilterInitializer.class.getName()) && + !init.equals( + ProxyUserAuthenticationFilterInitializer.class.getName())) { + target.add(init); + } + } + String actualInitializers = StringUtils.join(",", target); + conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, actualInitializers); + } + httpServer = new HttpServer2.Builder() .setName(NAME) .setConf(conf) @@ -168,10 +187,8 @@ public URL getKMSUrl() { public static void main(String[] args) throws Exception { KMSConfiguration.initLogging(); StringUtils.startupShutdownMessage(KMSWebServer.class, args, LOG); - Configuration conf = new ConfigurationWithLogging( - KMSConfiguration.getKMSConf()); - Configuration sslConf = new ConfigurationWithLogging( - SSLFactory.readSSLConfiguration(conf, SSLFactory.Mode.SERVER)); + Configuration conf = KMSConfiguration.getKMSConf(); + Configuration sslConf = SSLFactory.readSSLConfiguration(conf, SSLFactory.Mode.SERVER); KMSWebServer kmsWebServer = new KMSWebServer(conf, sslConf); kmsWebServer.start(); kmsWebServer.join(); diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index e37f2753d1818..9190df27ccc2c 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -38,6 +38,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -614,7 +615,18 @@ public Void run() throws Exception { @Test public void testStartStopHttpPseudo() throws Exception { - testStartStop(false, false); + // Make sure bogus errors don't get emitted. + GenericTestUtils.LogCapturer logs = + GenericTestUtils.LogCapturer.captureLogs(LoggerFactory.getLogger( + "com.sun.jersey.server.wadl.generators.AbstractWadlGeneratorGrammarGenerator")); + try { + testStartStop(false, false); + } finally { + logs.stopCapturing(); + } + assertFalse(logs.getOutput().contains( + "Couldn't find grammar element for class")); + } @Test @@ -3068,4 +3080,45 @@ public Void call() throws Exception { } }); } + + @Test + public void testFilterInitializer() throws Exception { + Configuration conf = new Configuration(); + File testDir = getTestDir(); + conf = createBaseKMSConf(testDir, conf); + conf.set("hadoop.security.authentication", "kerberos"); + conf.set("hadoop.kms.authentication.token.validity", "1"); + conf.set("hadoop.kms.authentication.type", "kerberos"); + conf.set("hadoop.kms.authentication.kerberos.keytab", + keytab.getAbsolutePath()); + conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost"); + conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT"); + conf.set("hadoop.http.filter.initializers", + AuthenticationFilterInitializer.class.getName()); + conf.set("hadoop.http.authentication.type", "kerberos"); + conf.set("hadoop.http.authentication.kerberos.principal", "HTTP/localhost"); + conf.set("hadoop.http.authentication.kerberos.keytab", + keytab.getAbsolutePath()); + + writeConf(testDir, conf); + + runServer(null, null, testDir, new KMSCallable() { + @Override + public Void call() throws Exception { + final Configuration conf = new Configuration(); + URL url = getKMSUrl(); + final URI uri = createKMSUri(getKMSUrl()); + + doAs("client", new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + final KeyProvider kp = createProvider(uri, conf); + Assert.assertTrue(kp.getKeys().isEmpty()); + return null; + } + }); + return null; + } + }); + } } diff --git a/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties b/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties index b8e6353b393f9..73c48534a0a01 100644 --- a/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties +++ b/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties @@ -26,6 +26,7 @@ log4j.rootLogger=INFO, stdout log4j.logger.org.apache.hadoop.conf=ERROR log4j.logger.org.apache.hadoop.crytpo.key.kms.server=ALL log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF +log4j.logger.com.sun.jersey.server.wadl.generators.AbstractWadlGeneratorGrammarGenerator=OFF log4j.logger.org.apache.hadoop.security=OFF log4j.logger.org.apache.directory.server.core=OFF log4j.logger.org.apache.hadoop.util.NativeCodeLoader=OFF diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml index adbd6e32bee58..c76abf750b78d 100644 --- a/hadoop-common-project/hadoop-minikdc/pom.xml +++ b/hadoop-common-project/hadoop-minikdc/pom.xml @@ -18,12 +18,12 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project 4.0.0 hadoop-minikdc - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT Apache Hadoop MiniKDC Apache Hadoop MiniKDC jar diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml index e0fedaf1434e6..22c56722d1c42 100644 --- a/hadoop-common-project/hadoop-nfs/pom.xml +++ b/hadoop-common-project/hadoop-nfs/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project hadoop-nfs - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT jar Apache Hadoop NFS diff --git a/hadoop-common-project/hadoop-registry/pom.xml b/hadoop-common-project/hadoop-registry/pom.xml index dc45309dca296..d5e0150bba94c 100644 --- a/hadoop-common-project/hadoop-registry/pom.xml +++ b/hadoop-common-project/hadoop-registry/pom.xml @@ -19,12 +19,12 @@ hadoop-project org.apache.hadoop - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../../hadoop-project 4.0.0 hadoop-registry - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT Apache Hadoop Registry @@ -221,6 +221,7 @@ org.apache.maven.plugins maven-surefire-plugin + ${ignoreTestFailure} false 900 -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml index 8be2593c21ffd..b36dbf30610ff 100644 --- a/hadoop-common-project/pom.xml +++ b/hadoop-common-project/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../hadoop-project hadoop-common-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT Apache Hadoop Common Project Apache Hadoop Common Project pom diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml index 07aa7b10a8320..0a5db2565b8c5 100644 --- a/hadoop-dist/pom.xml +++ b/hadoop-dist/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT ../hadoop-project hadoop-dist - 3.3.0-SNAPSHOT + 3.4.0-SNAPSHOT Apache Hadoop Distribution Apache Hadoop Distribution jar diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml deleted file mode 100644 index 673af41aeef0d..0000000000000 --- a/hadoop-hdds/client/pom.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-hdds - 0.5.0-SNAPSHOT - - - hadoop-hdds-client - 0.5.0-SNAPSHOT - Apache Hadoop Distributed Data Store Client Library - Apache Hadoop HDDS Client - jar - - - - org.apache.hadoop - hadoop-hdds-common - - - - io.netty - netty-all - - - - diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ClientCredentialInterceptor.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ClientCredentialInterceptor.java deleted file mode 100644 index 7a15808b2ea72..0000000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ClientCredentialInterceptor.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import org.apache.ratis.thirdparty.io.grpc.CallOptions; -import org.apache.ratis.thirdparty.io.grpc.Channel; -import org.apache.ratis.thirdparty.io.grpc.ClientCall; -import org.apache.ratis.thirdparty.io.grpc.ClientInterceptor; -import org.apache.ratis.thirdparty.io.grpc.ForwardingClientCall; -import org.apache.ratis.thirdparty.io.grpc.Metadata; -import org.apache.ratis.thirdparty.io.grpc.MethodDescriptor; - -import static org.apache.hadoop.ozone.OzoneConsts.OBT_METADATA_KEY; -import static org.apache.hadoop.ozone.OzoneConsts.USER_METADATA_KEY; - -/** - * GRPC client interceptor for ozone block token. - */ -public class ClientCredentialInterceptor implements ClientInterceptor { - - private final String user; - private final String token; - - public ClientCredentialInterceptor(String user, String token) { - this.user = user; - this.token = token; - } - - @Override - public ClientCall interceptCall( - MethodDescriptor method, - CallOptions callOptions, - Channel next) { - - return new ForwardingClientCall.SimpleForwardingClientCall( - next.newCall(method, callOptions)) { - @Override - public void start(Listener responseListener, Metadata headers) { - if (token != null) { - headers.put(OBT_METADATA_KEY, token); - } - if (user != null) { - headers.put(USER_METADATA_KEY, user); - } - super.start(responseListener, headers); - } - }; - } -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java deleted file mode 100644 index 04a8a1aaa1db3..0000000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ /dev/null @@ -1,466 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc; -import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc.XceiverClientProtocolServiceStub; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.tracing.GrpcClientInterceptor; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Time; - -import io.opentracing.Scope; -import io.opentracing.util.GlobalTracer; -import org.apache.ratis.thirdparty.io.grpc.ManagedChannel; -import org.apache.ratis.thirdparty.io.grpc.Status; -import org.apache.ratis.thirdparty.io.grpc.netty.GrpcSslContexts; -import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder; -import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; -import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.security.cert.X509Certificate; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * A Client for the storageContainer protocol for read object data. - */ -public class XceiverClientGrpc extends XceiverClientSpi { - static final Logger LOG = LoggerFactory.getLogger(XceiverClientGrpc.class); - private static final String COMPONENT = "dn"; - private final Pipeline pipeline; - private final Configuration config; - private Map asyncStubs; - private XceiverClientMetrics metrics; - private Map channels; - private final Semaphore semaphore; - private boolean closed = false; - private SecurityConfig secConfig; - private final boolean topologyAwareRead; - private X509Certificate caCert; - - /** - * Constructs a client that can communicate with the Container framework on - * data nodes. - * - * @param pipeline - Pipeline that defines the machines. - * @param config -- Ozone Config - * @param caCert - SCM ca certificate. - */ - public XceiverClientGrpc(Pipeline pipeline, Configuration config, - X509Certificate caCert) { - super(); - Preconditions.checkNotNull(pipeline); - Preconditions.checkNotNull(config); - this.pipeline = pipeline; - this.config = config; - this.secConfig = new SecurityConfig(config); - this.semaphore = - new Semaphore(HddsClientUtils.getMaxOutstandingRequests(config)); - this.metrics = XceiverClientManager.getXceiverClientMetrics(); - this.channels = new HashMap<>(); - this.asyncStubs = new HashMap<>(); - this.topologyAwareRead = config.getBoolean( - OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, - OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT); - this.caCert = caCert; - } - - /** - * Constructs a client that can communicate with the Container framework on - * data nodes. - * - * @param pipeline - Pipeline that defines the machines. - * @param config -- Ozone Config - */ - public XceiverClientGrpc(Pipeline pipeline, Configuration config) { - this(pipeline, config, null); - } - - /** - * To be used when grpc token is not enabled. - */ - @Override - public void connect() throws Exception { - // connect to the closest node, if closest node doesn't exist, delegate to - // first node, which is usually the leader in the pipeline. - DatanodeDetails dn = topologyAwareRead ? this.pipeline.getClosestNode() : - this.pipeline.getFirstNode(); - // just make a connection to the picked datanode at the beginning - connectToDatanode(dn, null); - } - - /** - * Passed encoded token to GRPC header when security is enabled. - */ - @Override - public void connect(String encodedToken) throws Exception { - // connect to the closest node, if closest node doesn't exist, delegate to - // first node, which is usually the leader in the pipeline. - DatanodeDetails dn = topologyAwareRead ? this.pipeline.getClosestNode() : - this.pipeline.getFirstNode(); - // just make a connection to the picked datanode at the beginning - connectToDatanode(dn, encodedToken); - } - - private void connectToDatanode(DatanodeDetails dn, String encodedToken) - throws IOException { - // read port from the data node, on failure use default configured - // port. - int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue(); - if (port == 0) { - port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); - } - - // Add credential context to the client call - String userName = UserGroupInformation.getCurrentUser().getShortUserName(); - if (LOG.isDebugEnabled()) { - LOG.debug("Nodes in pipeline : {}", pipeline.getNodes().toString()); - LOG.debug("Connecting to server : {}", dn.getIpAddress()); - } - NettyChannelBuilder channelBuilder = - NettyChannelBuilder.forAddress(dn.getIpAddress(), port).usePlaintext() - .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) - .intercept(new ClientCredentialInterceptor(userName, encodedToken), - new GrpcClientInterceptor()); - if (secConfig.isGrpcTlsEnabled()) { - SslContextBuilder sslContextBuilder = GrpcSslContexts.forClient(); - if (caCert != null) { - sslContextBuilder.trustManager(caCert); - } - if (secConfig.useTestCert()) { - channelBuilder.overrideAuthority("localhost"); - } - channelBuilder.useTransportSecurity(). - sslContext(sslContextBuilder.build()); - } else { - channelBuilder.usePlaintext(); - } - ManagedChannel channel = channelBuilder.build(); - XceiverClientProtocolServiceStub asyncStub = - XceiverClientProtocolServiceGrpc.newStub(channel); - asyncStubs.put(dn.getUuid(), asyncStub); - channels.put(dn.getUuid(), channel); - } - - /** - * Returns if the xceiver client connects to all servers in the pipeline. - * - * @return True if the connection is alive, false otherwise. - */ - @VisibleForTesting - public boolean isConnected(DatanodeDetails details) { - return isConnected(channels.get(details.getUuid())); - } - - private boolean isConnected(ManagedChannel channel) { - return channel != null && !channel.isTerminated() && !channel.isShutdown(); - } - - @Override - public void close() { - closed = true; - for (ManagedChannel channel : channels.values()) { - channel.shutdownNow(); - try { - channel.awaitTermination(60, TimeUnit.MINUTES); - } catch (Exception e) { - LOG.error("Unexpected exception while waiting for channel termination", - e); - } - } - } - - @Override - public Pipeline getPipeline() { - return pipeline; - } - - @Override - public ContainerCommandResponseProto sendCommand( - ContainerCommandRequestProto request) throws IOException { - try { - XceiverClientReply reply; - reply = sendCommandWithTraceIDAndRetry(request, null); - ContainerCommandResponseProto responseProto = reply.getResponse().get(); - return responseProto; - } catch (ExecutionException | InterruptedException e) { - throw new IOException("Failed to execute command " + request, e); - } - } - - @Override - public ContainerCommandResponseProto sendCommand( - ContainerCommandRequestProto request, List validators) - throws IOException { - try { - XceiverClientReply reply; - reply = sendCommandWithTraceIDAndRetry(request, validators); - ContainerCommandResponseProto responseProto = reply.getResponse().get(); - return responseProto; - } catch (ExecutionException | InterruptedException e) { - throw new IOException("Failed to execute command " + request, e); - } - } - - private XceiverClientReply sendCommandWithTraceIDAndRetry( - ContainerCommandRequestProto request, List validators) - throws IOException { - try (Scope scope = GlobalTracer.get() - .buildSpan("XceiverClientGrpc." + request.getCmdType().name()) - .startActive(true)) { - ContainerCommandRequestProto finalPayload = - ContainerCommandRequestProto.newBuilder(request) - .setTraceID(TracingUtil.exportCurrentSpan()).build(); - return sendCommandWithRetry(finalPayload, validators); - } - } - - private XceiverClientReply sendCommandWithRetry( - ContainerCommandRequestProto request, List validators) - throws IOException { - ContainerCommandResponseProto responseProto = null; - IOException ioException = null; - - // In case of an exception or an error, we will try to read from the - // datanodes in the pipeline in a round robin fashion. - - // TODO: cache the correct leader info in here, so that any subsequent calls - // should first go to leader - XceiverClientReply reply = new XceiverClientReply(null); - List datanodeList; - if ((request.getCmdType() == ContainerProtos.Type.ReadChunk || - request.getCmdType() == ContainerProtos.Type.GetSmallFile) && - topologyAwareRead) { - datanodeList = pipeline.getNodesInOrder(); - } else { - datanodeList = pipeline.getNodes(); - // Shuffle datanode list so that clients do not read in the same order - // every time. - Collections.shuffle(datanodeList); - } - for (DatanodeDetails dn : datanodeList) { - try { - if (LOG.isDebugEnabled()) { - LOG.debug("Executing command " + request + " on datanode " + dn); - } - // In case the command gets retried on a 2nd datanode, - // sendCommandAsyncCall will create a new channel and async stub - // in case these don't exist for the specific datanode. - reply.addDatanode(dn); - responseProto = sendCommandAsync(request, dn).getResponse().get(); - if (validators != null && !validators.isEmpty()) { - for (CheckedBiFunction validator : validators) { - validator.apply(request, responseProto); - } - } - break; - } catch (ExecutionException | InterruptedException | IOException e) { - LOG.error("Failed to execute command " + request + " on datanode " + dn - .getUuidString(), e); - if (!(e instanceof IOException)) { - if (Status.fromThrowable(e.getCause()).getCode() - == Status.UNAUTHENTICATED.getCode()) { - throw new SCMSecurityException("Failed to authenticate with " - + "GRPC XceiverServer with Ozone block token."); - } - ioException = new IOException(e); - } else { - ioException = (IOException) e; - } - responseProto = null; - } - } - - if (responseProto != null) { - reply.setResponse(CompletableFuture.completedFuture(responseProto)); - return reply; - } else { - Preconditions.checkNotNull(ioException); - LOG.error("Failed to execute command {} on the pipeline {}.", request, - pipeline); - throw ioException; - } - } - - // TODO: for a true async API, once the waitable future while executing - // the command on one channel fails, it should be retried asynchronously - // on the future Task for all the remaining datanodes. - - // Note: this Async api is not used currently used in any active I/O path. - // In case it gets used, the asynchronous retry logic needs to be plugged - // in here. - /** - * Sends a given command to server gets a waitable future back. - * - * @param request Request - * @return Response to the command - * @throws IOException - */ - @Override - public XceiverClientReply sendCommandAsync( - ContainerCommandRequestProto request) - throws IOException, ExecutionException, InterruptedException { - try (Scope scope = GlobalTracer.get() - .buildSpan("XceiverClientGrpc." + request.getCmdType().name()) - .startActive(true)) { - - ContainerCommandRequestProto finalPayload = - ContainerCommandRequestProto.newBuilder(request) - .setTraceID(TracingUtil.exportCurrentSpan()) - .build(); - XceiverClientReply asyncReply = - sendCommandAsync(finalPayload, pipeline.getFirstNode()); - // TODO : for now make this API sync in nature as async requests are - // served out of order over XceiverClientGrpc. This needs to be fixed - // if this API is to be used for I/O path. Currently, this is not - // used for Read/Write Operation but for tests. - if (!HddsUtils.isReadOnly(request)) { - asyncReply.getResponse().get(); - } - return asyncReply; - } - } - - private XceiverClientReply sendCommandAsync( - ContainerCommandRequestProto request, DatanodeDetails dn) - throws IOException, ExecutionException, InterruptedException { - if (closed) { - throw new IOException("This channel is not connected."); - } - - UUID dnId = dn.getUuid(); - ManagedChannel channel = channels.get(dnId); - // If the channel doesn't exist for this specific datanode or the channel - // is closed, just reconnect - String token = request.getEncodedToken(); - if (!isConnected(channel)) { - reconnect(dn, token); - } - if (LOG.isDebugEnabled()) { - LOG.debug("Send command {} to datanode {}", - request.getCmdType().toString(), dn.getNetworkFullPath()); - } - final CompletableFuture replyFuture = - new CompletableFuture<>(); - semaphore.acquire(); - long requestTime = Time.monotonicNowNanos(); - metrics.incrPendingContainerOpsMetrics(request.getCmdType()); - // create a new grpc stream for each non-async call. - - // TODO: for async calls, we should reuse StreamObserver resources. - final StreamObserver requestObserver = - asyncStubs.get(dnId) - .send(new StreamObserver() { - @Override - public void onNext(ContainerCommandResponseProto value) { - replyFuture.complete(value); - metrics.decrPendingContainerOpsMetrics(request.getCmdType()); - metrics.addContainerOpsLatency(request.getCmdType(), - Time.monotonicNowNanos() - requestTime); - semaphore.release(); - } - - @Override - public void onError(Throwable t) { - replyFuture.completeExceptionally(t); - metrics.decrPendingContainerOpsMetrics(request.getCmdType()); - metrics.addContainerOpsLatency(request.getCmdType(), - Time.monotonicNowNanos() - requestTime); - semaphore.release(); - } - - @Override - public void onCompleted() { - if (!replyFuture.isDone()) { - replyFuture.completeExceptionally(new IOException( - "Stream completed but no reply for request " + request)); - } - } - }); - requestObserver.onNext(request); - requestObserver.onCompleted(); - return new XceiverClientReply(replyFuture); - } - - private void reconnect(DatanodeDetails dn, String encodedToken) - throws IOException { - ManagedChannel channel; - try { - connectToDatanode(dn, encodedToken); - channel = channels.get(dn.getUuid()); - } catch (Exception e) { - LOG.error("Error while connecting: ", e); - throw new IOException(e); - } - - if (channel == null || !isConnected(channel)) { - throw new IOException("This channel is not connected."); - } - } - - @Override - public XceiverClientReply watchForCommit(long index, long timeout) - throws InterruptedException, ExecutionException, TimeoutException, - IOException { - // there is no notion of watch for commit index in standalone pipeline - return null; - }; - - public long getReplicatedMinCommitIndex() { - return 0; - } - /** - * Returns pipeline Type. - * - * @return - Stand Alone as the type. - */ - @Override - public HddsProtos.ReplicationType getPipelineType() { - return HddsProtos.ReplicationType.STAND_ALONE; - } -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java deleted file mode 100644 index b15828a153098..0000000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java +++ /dev/null @@ -1,390 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.Config; -import org.apache.hadoop.hdds.conf.ConfigGroup; -import org.apache.hadoop.hdds.conf.ConfigType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.util.concurrent.Callable; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE; -import static org.apache.hadoop.hdds.conf.ConfigTag.PERFORMANCE; - -/** - * XceiverClientManager is responsible for the lifecycle of XceiverClient - * instances. Callers use this class to acquire an XceiverClient instance - * connected to the desired container pipeline. When done, the caller also uses - * this class to release the previously acquired XceiverClient instance. - * - * - * This class caches connection to container for reuse purpose, such that - * accessing same container frequently will be through the same connection - * without reestablishing connection. But the connection will be closed if - * not being used for a period of time. - */ -public class XceiverClientManager implements Closeable { - private static final Logger LOG = - LoggerFactory.getLogger(XceiverClientManager.class); - //TODO : change this to SCM configuration class - private final Configuration conf; - private final Cache clientCache; - private final boolean useRatis; - private X509Certificate caCert; - - private static XceiverClientMetrics metrics; - private boolean isSecurityEnabled; - private final boolean topologyAwareRead; - /** - * Creates a new XceiverClientManager for non secured ozone cluster. - * For security enabled ozone cluster, client should use the other constructor - * with a valid ca certificate in pem string format. - * - * @param conf configuration - */ - public XceiverClientManager(Configuration conf) throws IOException { - this(conf, OzoneConfiguration.of(conf).getObject(ScmClientConfig.class), - null); - } - - public XceiverClientManager(Configuration conf, ScmClientConfig clientConf, - String caCertPem) throws IOException { - Preconditions.checkNotNull(clientConf); - Preconditions.checkNotNull(conf); - long staleThresholdMs = clientConf.getStaleThreshold(MILLISECONDS); - this.useRatis = conf.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); - this.conf = conf; - this.isSecurityEnabled = OzoneSecurityUtil.isSecurityEnabled(conf); - if (isSecurityEnabled) { - Preconditions.checkNotNull(caCertPem); - try { - this.caCert = CertificateCodec.getX509Cert(caCertPem); - } catch (CertificateException ex) { - throw new SCMSecurityException("Error: Fail to get SCM CA certificate", - ex); - } - } - - this.clientCache = CacheBuilder.newBuilder() - .expireAfterAccess(staleThresholdMs, MILLISECONDS) - .maximumSize(clientConf.getMaxSize()) - .removalListener( - new RemovalListener() { - @Override - public void onRemoval( - RemovalNotification - removalNotification) { - synchronized (clientCache) { - // Mark the entry as evicted - XceiverClientSpi info = removalNotification.getValue(); - info.setEvicted(); - } - } - }).build(); - topologyAwareRead = conf.getBoolean( - OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, - OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT); - } - - @VisibleForTesting - public Cache getClientCache() { - return clientCache; - } - - /** - * Acquires a XceiverClientSpi connected to a container capable of - * storing the specified key. - * - * If there is already a cached XceiverClientSpi, simply return - * the cached otherwise create a new one. - * - * @param pipeline the container pipeline for the client connection - * @return XceiverClientSpi connected to a container - * @throws IOException if a XceiverClientSpi cannot be acquired - */ - public XceiverClientSpi acquireClient(Pipeline pipeline) - throws IOException { - return acquireClient(pipeline, false); - } - - /** - * Acquires a XceiverClientSpi connected to a container for read. - * - * If there is already a cached XceiverClientSpi, simply return - * the cached otherwise create a new one. - * - * @param pipeline the container pipeline for the client connection - * @return XceiverClientSpi connected to a container - * @throws IOException if a XceiverClientSpi cannot be acquired - */ - public XceiverClientSpi acquireClientForReadData(Pipeline pipeline) - throws IOException { - return acquireClient(pipeline, true); - } - - private XceiverClientSpi acquireClient(Pipeline pipeline, boolean read) - throws IOException { - Preconditions.checkNotNull(pipeline); - Preconditions.checkArgument(pipeline.getNodes() != null); - Preconditions.checkArgument(!pipeline.getNodes().isEmpty()); - - synchronized (clientCache) { - XceiverClientSpi info = getClient(pipeline, read); - info.incrementReference(); - return info; - } - } - - /** - * Releases a XceiverClientSpi after use. - * - * @param client client to release - * @param invalidateClient if true, invalidates the client in cache - */ - public void releaseClient(XceiverClientSpi client, boolean invalidateClient) { - releaseClient(client, invalidateClient, false); - } - - /** - * Releases a read XceiverClientSpi after use. - * - * @param client client to release - * @param invalidateClient if true, invalidates the client in cache - */ - public void releaseClientForReadData(XceiverClientSpi client, - boolean invalidateClient) { - releaseClient(client, invalidateClient, true); - } - - private void releaseClient(XceiverClientSpi client, boolean invalidateClient, - boolean read) { - Preconditions.checkNotNull(client); - synchronized (clientCache) { - client.decrementReference(); - if (invalidateClient) { - Pipeline pipeline = client.getPipeline(); - String key = getPipelineCacheKey(pipeline, read); - XceiverClientSpi cachedClient = clientCache.getIfPresent(key); - if (cachedClient == client) { - clientCache.invalidate(key); - } - } - } - } - - private XceiverClientSpi getClient(Pipeline pipeline, boolean forRead) - throws IOException { - HddsProtos.ReplicationType type = pipeline.getType(); - try { - // create different client for read different pipeline node based on - // network topology - String key = getPipelineCacheKey(pipeline, forRead); - // Append user short name to key to prevent a different user - // from using same instance of xceiverClient. - key = isSecurityEnabled ? - key + UserGroupInformation.getCurrentUser().getShortUserName() : key; - return clientCache.get(key, new Callable() { - @Override - public XceiverClientSpi call() throws Exception { - XceiverClientSpi client = null; - switch (type) { - case RATIS: - client = XceiverClientRatis.newXceiverClientRatis(pipeline, conf, - caCert); - client.connect(); - break; - case STAND_ALONE: - client = new XceiverClientGrpc(pipeline, conf, caCert); - break; - case CHAINED: - default: - throw new IOException("not implemented" + pipeline.getType()); - } - return client; - } - }); - } catch (Exception e) { - throw new IOException( - "Exception getting XceiverClient: " + e.toString(), e); - } - } - - private String getPipelineCacheKey(Pipeline pipeline, boolean forRead) { - String key = pipeline.getId().getId().toString() + pipeline.getType(); - if (topologyAwareRead && forRead) { - try { - key += pipeline.getClosestNode().getHostName(); - } catch (IOException e) { - LOG.error("Failed to get closest node to create pipeline cache key:" + - e.getMessage()); - } - } - return key; - } - - /** - * Close and remove all the cached clients. - */ - @Override - public void close() { - //closing is done through RemovalListener - clientCache.invalidateAll(); - clientCache.cleanUp(); - - if (metrics != null) { - metrics.unRegister(); - } - } - - /** - * Tells us if Ratis is enabled for this cluster. - * @return True if Ratis is enabled. - */ - public boolean isUseRatis() { - return useRatis; - } - - /** - * Returns hard coded 3 as replication factor. - * @return 3 - */ - public HddsProtos.ReplicationFactor getFactor() { - if(isUseRatis()) { - return HddsProtos.ReplicationFactor.THREE; - } - return HddsProtos.ReplicationFactor.ONE; - } - - /** - * Returns the default replication type. - * @return Ratis or Standalone - */ - public HddsProtos.ReplicationType getType() { - // TODO : Fix me and make Ratis default before release. - // TODO: Remove this as replication factor and type are pipeline properties - if(isUseRatis()) { - return HddsProtos.ReplicationType.RATIS; - } - return HddsProtos.ReplicationType.STAND_ALONE; - } - - public Function byteBufferToByteStringConversion(){ - return ByteStringConversion.createByteBufferConversion(conf); - } - - /** - * Get xceiver client metric. - */ - public synchronized static XceiverClientMetrics getXceiverClientMetrics() { - if (metrics == null) { - metrics = XceiverClientMetrics.create(); - } - - return metrics; - } - - /** - * Configuration for HDDS client. - */ - @ConfigGroup(prefix = "scm.container.client") - public static class ScmClientConfig { - - private int maxSize; - private long staleThreshold; - private int maxOutstandingRequests; - - public long getStaleThreshold(TimeUnit unit) { - return unit.convert(staleThreshold, MILLISECONDS); - } - - @Config(key = "idle.threshold", - type = ConfigType.TIME, timeUnit = MILLISECONDS, - defaultValue = "10s", - tags = { OZONE, PERFORMANCE }, - description = - "In the standalone pipelines, the SCM clients use netty to " - + " communicate with the container. It also uses connection pooling" - + " to reduce client side overheads. This allows a connection to" - + " stay idle for a while before the connection is closed." - ) - public void setStaleThreshold(long staleThreshold) { - this.staleThreshold = staleThreshold; - } - - public int getMaxSize() { - return maxSize; - } - - @Config(key = "max.size", - defaultValue = "256", - tags = { OZONE, PERFORMANCE }, - description = - "Controls the maximum number of connections that are cached via" - + " client connection pooling. If the number of connections" - + " exceed this count, then the oldest idle connection is evicted." - ) - public void setMaxSize(int maxSize) { - this.maxSize = maxSize; - } - - public int getMaxOutstandingRequests() { - return maxOutstandingRequests; - } - - @Config(key = "max.outstanding.requests", - defaultValue = "100", - tags = { OZONE, PERFORMANCE }, - description = - "Controls the maximum number of outstanding async requests that can" - + " be handled by the Standalone as well as Ratis client." - ) - public void setMaxOutstandingRequests(int maxOutstandingRequests) { - this.maxOutstandingRequests = maxOutstandingRequests; - } - } - -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java deleted file mode 100644 index 5d43c5ef22585..0000000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MetricsRegistry; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableRate; - -/** - * The client metrics for the Storage Container protocol. - */ -@InterfaceAudience.Private -@Metrics(about = "Storage Container Client Metrics", context = "dfs") -public class XceiverClientMetrics { - public static final String SOURCE_NAME = XceiverClientMetrics.class - .getSimpleName(); - - private @Metric MutableCounterLong pendingOps; - private @Metric MutableCounterLong totalOps; - private MutableCounterLong[] pendingOpsArray; - private MutableCounterLong[] opsArray; - private MutableRate[] containerOpsLatency; - private MetricsRegistry registry; - - public XceiverClientMetrics() { - int numEnumEntries = ContainerProtos.Type.values().length; - this.registry = new MetricsRegistry(SOURCE_NAME); - - this.pendingOpsArray = new MutableCounterLong[numEnumEntries]; - this.opsArray = new MutableCounterLong[numEnumEntries]; - this.containerOpsLatency = new MutableRate[numEnumEntries]; - for (int i = 0; i < numEnumEntries; i++) { - pendingOpsArray[i] = registry.newCounter( - "numPending" + ContainerProtos.Type.forNumber(i + 1), - "number of pending" + ContainerProtos.Type.forNumber(i + 1) + " ops", - (long) 0); - opsArray[i] = registry - .newCounter("opCount" + ContainerProtos.Type.forNumber(i + 1), - "number of" + ContainerProtos.Type.forNumber(i + 1) + " ops", - (long) 0); - - containerOpsLatency[i] = registry.newRate( - ContainerProtos.Type.forNumber(i + 1) + "Latency", - "latency of " + ContainerProtos.Type.forNumber(i + 1) - + " ops"); - } - } - - public static XceiverClientMetrics create() { - DefaultMetricsSystem.initialize(SOURCE_NAME); - MetricsSystem ms = DefaultMetricsSystem.instance(); - return ms.register(SOURCE_NAME, "Storage Container Client Metrics", - new XceiverClientMetrics()); - } - - public void incrPendingContainerOpsMetrics(ContainerProtos.Type type) { - pendingOps.incr(); - totalOps.incr(); - opsArray[type.ordinal()].incr(); - pendingOpsArray[type.ordinal()].incr(); - } - - public void decrPendingContainerOpsMetrics(ContainerProtos.Type type) { - pendingOps.incr(-1); - pendingOpsArray[type.ordinal()].incr(-1); - } - - public void addContainerOpsLatency(ContainerProtos.Type type, - long latencyNanos) { - containerOpsLatency[type.ordinal()].add(latencyNanos); - } - - public long getContainerOpsMetrics(ContainerProtos.Type type) { - return pendingOpsArray[type.ordinal()].value(); - } - - @VisibleForTesting - public long getTotalOpCount() { - return totalOps.value(); - } - - @VisibleForTesting - public long getContainerOpCountMetrics(ContainerProtos.Type type) { - return opsArray[type.ordinal()].value(); - } - - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE_NAME); - } -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java deleted file mode 100644 index 04fababf50447..0000000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import java.io.IOException; -import java.security.cert.X509Certificate; -import java.util.Collection; -import java.util.List; -import java.util.Objects; -import java.util.OptionalLong; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.ratis.RatisHelper; -import org.apache.ratis.client.RaftClient; -import org.apache.ratis.grpc.GrpcTlsConfig; -import org.apache.ratis.proto.RaftProtos; -import org.apache.ratis.protocol.GroupMismatchException; -import org.apache.ratis.protocol.RaftClientReply; -import org.apache.ratis.protocol.RaftException; -import org.apache.ratis.retry.RetryPolicy; -import org.apache.ratis.rpc.RpcType; -import org.apache.ratis.rpc.SupportedRpcType; -import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.ratis.util.TimeDuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - -import io.opentracing.Scope; -import io.opentracing.util.GlobalTracer; - -/** - * An abstract implementation of {@link XceiverClientSpi} using Ratis. - * The underlying RPC mechanism can be chosen via the constructor. - */ -public final class XceiverClientRatis extends XceiverClientSpi { - public static final Logger LOG = - LoggerFactory.getLogger(XceiverClientRatis.class); - - public static XceiverClientRatis newXceiverClientRatis( - org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline, - Configuration ozoneConf) { - return newXceiverClientRatis(pipeline, ozoneConf, null); - } - - public static XceiverClientRatis newXceiverClientRatis( - org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline, - Configuration ozoneConf, X509Certificate caCert) { - final String rpcType = ozoneConf - .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); - final TimeDuration clientRequestTimeout = - RatisHelper.getClientRequestTimeout(ozoneConf); - final int maxOutstandingRequests = - HddsClientUtils.getMaxOutstandingRequests(ozoneConf); - final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf); - final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new - SecurityConfig(ozoneConf), caCert); - return new XceiverClientRatis(pipeline, - SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests, - retryPolicy, tlsConfig, clientRequestTimeout); - } - - private final Pipeline pipeline; - private final RpcType rpcType; - private final AtomicReference client = new AtomicReference<>(); - private final int maxOutstandingRequests; - private final RetryPolicy retryPolicy; - private final GrpcTlsConfig tlsConfig; - private final TimeDuration clientRequestTimeout; - - // Map to track commit index at every server - private final ConcurrentHashMap commitInfoMap; - - private XceiverClientMetrics metrics; - - /** - * Constructs a client. - */ - private XceiverClientRatis(Pipeline pipeline, RpcType rpcType, - int maxOutStandingChunks, RetryPolicy retryPolicy, - GrpcTlsConfig tlsConfig, TimeDuration timeout) { - super(); - this.pipeline = pipeline; - this.rpcType = rpcType; - this.maxOutstandingRequests = maxOutStandingChunks; - this.retryPolicy = retryPolicy; - commitInfoMap = new ConcurrentHashMap<>(); - this.tlsConfig = tlsConfig; - this.clientRequestTimeout = timeout; - metrics = XceiverClientManager.getXceiverClientMetrics(); - } - - private void updateCommitInfosMap( - Collection commitInfoProtos) { - // if the commitInfo map is empty, just update the commit indexes for each - // of the servers - if (commitInfoMap.isEmpty()) { - commitInfoProtos.forEach(proto -> commitInfoMap - .put(RatisHelper.toDatanodeId(proto.getServer()), - proto.getCommitIndex())); - // In case the commit is happening 2 way, just update the commitIndex - // for the servers which have been successfully updating the commit - // indexes. This is important because getReplicatedMinCommitIndex() - // should always return the min commit index out of the nodes which have - // been replicating data successfully. - } else { - commitInfoProtos.forEach(proto -> commitInfoMap - .computeIfPresent(RatisHelper.toDatanodeId(proto.getServer()), - (address, index) -> { - index = proto.getCommitIndex(); - return index; - })); - } - } - - /** - * Returns Ratis as pipeline Type. - * - * @return - Ratis - */ - @Override - public HddsProtos.ReplicationType getPipelineType() { - return HddsProtos.ReplicationType.RATIS; - } - - @Override - public Pipeline getPipeline() { - return pipeline; - } - - @Override - public void connect() throws Exception { - if (LOG.isDebugEnabled()) { - LOG.debug("Connecting to pipeline:{} datanode:{}", getPipeline().getId(), - RatisHelper.toRaftPeerId(pipeline.getFirstNode())); - } - // TODO : XceiverClient ratis should pass the config value of - // maxOutstandingRequests so as to set the upper bound on max no of async - // requests to be handled by raft client - if (!client.compareAndSet(null, - RatisHelper.newRaftClient(rpcType, getPipeline(), retryPolicy, - maxOutstandingRequests, tlsConfig, clientRequestTimeout))) { - throw new IllegalStateException("Client is already connected."); - } - } - - @Override - public void connect(String encodedToken) throws Exception { - throw new UnsupportedOperationException("Block tokens are not " + - "implemented for Ratis clients."); - } - - @Override - public void close() { - final RaftClient c = client.getAndSet(null); - if (c != null) { - closeRaftClient(c); - } - } - - private void closeRaftClient(RaftClient raftClient) { - try { - raftClient.close(); - } catch (IOException e) { - throw new IllegalStateException(e); - } - } - - private RaftClient getClient() { - return Objects.requireNonNull(client.get(), "client is null"); - } - - - @VisibleForTesting - public ConcurrentHashMap getCommitInfoMap() { - return commitInfoMap; - } - - private CompletableFuture sendRequestAsync( - ContainerCommandRequestProto request) { - try (Scope scope = GlobalTracer.get() - .buildSpan("XceiverClientRatis." + request.getCmdType().name()) - .startActive(true)) { - final ContainerCommandRequestMessage message - = ContainerCommandRequestMessage.toMessage( - request, TracingUtil.exportCurrentSpan()); - if (HddsUtils.isReadOnly(request)) { - if (LOG.isDebugEnabled()) { - LOG.debug("sendCommandAsync ReadOnly {}", message); - } - return getClient().sendReadOnlyAsync(message); - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("sendCommandAsync {}", message); - } - return getClient().sendAsync(message); - } - } - } - - // gets the minimum log index replicated to all servers - @Override - public long getReplicatedMinCommitIndex() { - OptionalLong minIndex = - commitInfoMap.values().parallelStream().mapToLong(v -> v).min(); - return minIndex.isPresent() ? minIndex.getAsLong() : 0; - } - - private void addDatanodetoReply(UUID address, XceiverClientReply reply) { - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - builder.setUuid(address.toString()); - reply.addDatanode(builder.build()); - } - - @Override - public XceiverClientReply watchForCommit(long index, long timeout) - throws InterruptedException, ExecutionException, TimeoutException, - IOException { - long commitIndex = getReplicatedMinCommitIndex(); - XceiverClientReply clientReply = new XceiverClientReply(null); - if (commitIndex >= index) { - // return the min commit index till which the log has been replicated to - // all servers - clientReply.setLogIndex(commitIndex); - return clientReply; - } - if (LOG.isDebugEnabled()) { - LOG.debug("commit index : {} watch timeout : {}", index, timeout); - } - RaftClientReply reply; - try { - CompletableFuture replyFuture = getClient() - .sendWatchAsync(index, RaftProtos.ReplicationLevel.ALL_COMMITTED); - replyFuture.get(timeout, TimeUnit.MILLISECONDS); - } catch (Exception e) { - Throwable t = HddsClientUtils.checkForException(e); - LOG.warn("3 way commit failed on pipeline {}", pipeline, e); - if (t instanceof GroupMismatchException) { - throw e; - } - reply = getClient() - .sendWatchAsync(index, RaftProtos.ReplicationLevel.MAJORITY_COMMITTED) - .get(timeout, TimeUnit.MILLISECONDS); - List commitInfoProtoList = - reply.getCommitInfos().stream() - .filter(i -> i.getCommitIndex() < index) - .collect(Collectors.toList()); - commitInfoProtoList.parallelStream().forEach(proto -> { - UUID address = RatisHelper.toDatanodeId(proto.getServer()); - addDatanodetoReply(address, clientReply); - // since 3 way commit has failed, the updated map from now on will - // only store entries for those datanodes which have had successful - // replication. - commitInfoMap.remove(address); - LOG.info( - "Could not commit index {} on pipeline {} to all the nodes. " + - "Server {} has failed. Committed by majority.", - index, pipeline, address); - }); - } - clientReply.setLogIndex(index); - return clientReply; - } - - /** - * Sends a given command to server gets a waitable future back. - * - * @param request Request - * @return Response to the command - */ - @Override - public XceiverClientReply sendCommandAsync( - ContainerCommandRequestProto request) { - XceiverClientReply asyncReply = new XceiverClientReply(null); - long requestTime = Time.monotonicNowNanos(); - CompletableFuture raftClientReply = - sendRequestAsync(request); - metrics.incrPendingContainerOpsMetrics(request.getCmdType()); - CompletableFuture containerCommandResponse = - raftClientReply.whenComplete((reply, e) -> { - if (LOG.isDebugEnabled()) { - LOG.debug("received reply {} for request: cmdType={} containerID={}" - + " pipelineID={} traceID={} exception: {}", reply, - request.getCmdType(), request.getContainerID(), - request.getPipelineID(), request.getTraceID(), e); - } - metrics.decrPendingContainerOpsMetrics(request.getCmdType()); - metrics.addContainerOpsLatency(request.getCmdType(), - Time.monotonicNowNanos() - requestTime); - }).thenApply(reply -> { - try { - if (!reply.isSuccess()) { - // in case of raft retry failure, the raft client is - // not able to connect to the leader hence the pipeline - // can not be used but this instance of RaftClient will close - // and refreshed again. In case the client cannot connect to - // leader, getClient call will fail. - - // No need to set the failed Server ID here. Ozone client - // will directly exclude this pipeline in next allocate block - // to SCM as in this case, it is the raft client which is not - // able to connect to leader in the pipeline, though the - // pipeline can still be functional. - RaftException exception = reply.getException(); - Preconditions.checkNotNull(exception, "Raft reply failure but " + - "no exception propagated."); - throw new CompletionException(exception); - } - ContainerCommandResponseProto response = - ContainerCommandResponseProto - .parseFrom(reply.getMessage().getContent()); - UUID serverId = RatisHelper.toDatanodeId(reply.getReplierId()); - if (response.getResult() == ContainerProtos.Result.SUCCESS) { - updateCommitInfosMap(reply.getCommitInfos()); - } - asyncReply.setLogIndex(reply.getLogIndex()); - addDatanodetoReply(serverId, asyncReply); - return response; - } catch (InvalidProtocolBufferException e) { - throw new CompletionException(e); - } - }); - asyncReply.setResponse(containerCommandResponse); - return asyncReply; - } - -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java deleted file mode 100644 index 982fb8ea1eec6..0000000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java +++ /dev/null @@ -1,495 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.client; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadContainerResponseProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.List; - -/** - * This class provides the client-facing APIs of container operations. - */ -public class ContainerOperationClient implements ScmClient { - - private static final Logger LOG = - LoggerFactory.getLogger(ContainerOperationClient.class); - private static long containerSizeB = -1; - private final StorageContainerLocationProtocol - storageContainerLocationClient; - private final XceiverClientManager xceiverClientManager; - - public ContainerOperationClient( - StorageContainerLocationProtocol - storageContainerLocationClient, - XceiverClientManager xceiverClientManager) { - this.storageContainerLocationClient = storageContainerLocationClient; - this.xceiverClientManager = xceiverClientManager; - } - - /** - * Return the capacity of containers. The current assumption is that all - * containers have the same capacity. Therefore one static is sufficient for - * any container. - * @return The capacity of one container in number of bytes. - */ - public static long getContainerSizeB() { - return containerSizeB; - } - - /** - * Set the capacity of container. Should be exactly once on system start. - * @param size Capacity of one container in number of bytes. - */ - public static void setContainerSizeB(long size) { - containerSizeB = size; - } - - - @Override - public ContainerWithPipeline createContainer(String owner) - throws IOException { - XceiverClientSpi client = null; - try { - ContainerWithPipeline containerWithPipeline = - storageContainerLocationClient.allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), owner); - Pipeline pipeline = containerWithPipeline.getPipeline(); - client = xceiverClientManager.acquireClient(pipeline); - - Preconditions.checkState(pipeline.isOpen(), String - .format("Unexpected state=%s for pipeline=%s, expected state=%s", - pipeline.getPipelineState(), pipeline.getId(), - Pipeline.PipelineState.OPEN)); - createContainer(client, - containerWithPipeline.getContainerInfo().getContainerID()); - return containerWithPipeline; - } finally { - if (client != null) { - xceiverClientManager.releaseClient(client, false); - } - } - } - - /** - * Create a container over pipeline specified by the SCM. - * - * @param client - Client to communicate with Datanodes. - * @param containerId - Container ID. - * @throws IOException - */ - public void createContainer(XceiverClientSpi client, - long containerId) throws IOException { - ContainerProtocolCalls.createContainer(client, containerId, null); - - // Let us log this info after we let SCM know that we have completed the - // creation state. - if (LOG.isDebugEnabled()) { - LOG.debug("Created container " + containerId - + " machines:" + client.getPipeline().getNodes()); - } - } - - /** - * Creates a pipeline over the machines choosen by the SCM. - * - * @param client - Client - * @param pipeline - pipeline to be createdon Datanodes. - * @throws IOException - */ - private void createPipeline(XceiverClientSpi client, Pipeline pipeline) - throws IOException { - - Preconditions.checkNotNull(pipeline.getId(), "Pipeline " + - "name cannot be null when client create flag is set."); - - // Pipeline creation is a three step process. - // - // 1. Notify SCM that this client is doing a create pipeline on - // datanodes. - // - // 2. Talk to Datanodes to create the pipeline. - // - // 3. update SCM that pipeline creation was successful. - - // TODO: this has not been fully implemented on server side - // SCMClientProtocolServer#notifyObjectStageChange - // TODO: when implement the pipeline state machine, change - // the pipeline name (string) to pipeline id (long) - //storageContainerLocationClient.notifyObjectStageChange( - // ObjectStageChangeRequestProto.Type.pipeline, - // pipeline.getPipelineName(), - // ObjectStageChangeRequestProto.Op.create, - // ObjectStageChangeRequestProto.Stage.begin); - - // client.createPipeline(); - // TODO: Use PipelineManager to createPipeline - - //storageContainerLocationClient.notifyObjectStageChange( - // ObjectStageChangeRequestProto.Type.pipeline, - // pipeline.getPipelineName(), - // ObjectStageChangeRequestProto.Op.create, - // ObjectStageChangeRequestProto.Stage.complete); - - // TODO : Should we change the state on the client side ?? - // That makes sense, but it is not needed for the client to work. - if (LOG.isDebugEnabled()) { - LOG.debug("Pipeline creation successful. Pipeline: {}", - pipeline.toString()); - } - } - - @Override - public ContainerWithPipeline createContainer(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, String owner) throws IOException { - XceiverClientSpi client = null; - try { - // allocate container on SCM. - ContainerWithPipeline containerWithPipeline = - storageContainerLocationClient.allocateContainer(type, factor, - owner); - Pipeline pipeline = containerWithPipeline.getPipeline(); - client = xceiverClientManager.acquireClient(pipeline); - - // connect to pipeline leader and allocate container on leader datanode. - client = xceiverClientManager.acquireClient(pipeline); - createContainer(client, - containerWithPipeline.getContainerInfo().getContainerID()); - return containerWithPipeline; - } finally { - if (client != null) { - xceiverClientManager.releaseClient(client, false); - } - } - } - - /** - * Returns a set of Nodes that meet a query criteria. - * - * @param nodeStatuses - Criteria that we want the node to have. - * @param queryScope - Query scope - Cluster or pool. - * @param poolName - if it is pool, a pool name is required. - * @return A set of nodes that meet the requested criteria. - * @throws IOException - */ - @Override - public List queryNode(HddsProtos.NodeState - nodeStatuses, HddsProtos.QueryScope queryScope, String poolName) - throws IOException { - return storageContainerLocationClient.queryNode(nodeStatuses, queryScope, - poolName); - } - - /** - * Creates a specified replication pipeline. - */ - @Override - public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool) - throws IOException { - return storageContainerLocationClient.createReplicationPipeline(type, - factor, nodePool); - } - - @Override - public List listPipelines() throws IOException { - return storageContainerLocationClient.listPipelines(); - } - - @Override - public void activatePipeline(HddsProtos.PipelineID pipelineID) - throws IOException { - storageContainerLocationClient.activatePipeline(pipelineID); - } - - @Override - public void deactivatePipeline(HddsProtos.PipelineID pipelineID) - throws IOException { - storageContainerLocationClient.deactivatePipeline(pipelineID); - } - - @Override - public void closePipeline(HddsProtos.PipelineID pipelineID) - throws IOException { - storageContainerLocationClient.closePipeline(pipelineID); - } - - @Override - public void close() { - try { - xceiverClientManager.close(); - } catch (Exception ex) { - LOG.error("Can't close " + this.getClass().getSimpleName(), ex); - } - } - - /** - * Deletes an existing container. - * - * @param containerId - ID of the container. - * @param pipeline - Pipeline that represents the container. - * @param force - true to forcibly delete the container. - * @throws IOException - */ - @Override - public void deleteContainer(long containerId, Pipeline pipeline, - boolean force) throws IOException { - XceiverClientSpi client = null; - try { - client = xceiverClientManager.acquireClient(pipeline); - ContainerProtocolCalls - .deleteContainer(client, containerId, force, null); - storageContainerLocationClient - .deleteContainer(containerId); - if (LOG.isDebugEnabled()) { - LOG.debug("Deleted container {}, machines: {} ", containerId, - pipeline.getNodes()); - } - } finally { - if (client != null) { - xceiverClientManager.releaseClient(client, false); - } - } - } - - /** - * Delete the container, this will release any resource it uses. - * @param containerID - containerID. - * @param force - True to forcibly delete the container. - * @throws IOException - */ - @Override - public void deleteContainer(long containerID, boolean force) - throws IOException { - ContainerWithPipeline info = getContainerWithPipeline(containerID); - deleteContainer(containerID, info.getPipeline(), force); - } - - @Override - public List listContainer(long startContainerID, - int count) throws IOException { - return storageContainerLocationClient.listContainer( - startContainerID, count); - } - - /** - * Get meta data from an existing container. - * - * @param containerID - ID of the container. - * @param pipeline - Pipeline where the container is located. - * @return ContainerInfo - * @throws IOException - */ - @Override - public ContainerDataProto readContainer(long containerID, - Pipeline pipeline) throws IOException { - XceiverClientSpi client = null; - try { - client = xceiverClientManager.acquireClient(pipeline); - ReadContainerResponseProto response = - ContainerProtocolCalls.readContainer(client, containerID, null); - if (LOG.isDebugEnabled()) { - LOG.debug("Read container {}, machines: {} ", containerID, - pipeline.getNodes()); - } - return response.getContainerData(); - } finally { - if (client != null) { - xceiverClientManager.releaseClient(client, false); - } - } - } - - /** - * Get meta data from an existing container. - * @param containerID - ID of the container. - * @return ContainerInfo - a message of protobuf which has basic info - * of a container. - * @throws IOException - */ - @Override - public ContainerDataProto readContainer(long containerID) throws IOException { - ContainerWithPipeline info = getContainerWithPipeline(containerID); - return readContainer(containerID, info.getPipeline()); - } - - /** - * Given an id, return the pipeline associated with the container. - * @param containerId - String Container ID - * @return Pipeline of the existing container, corresponding to the given id. - * @throws IOException - */ - @Override - public ContainerInfo getContainer(long containerId) throws - IOException { - return storageContainerLocationClient.getContainer(containerId); - } - - /** - * Gets a container by Name -- Throws if the container does not exist. - * - * @param containerId - Container ID - * @return ContainerWithPipeline - * @throws IOException - */ - @Override - public ContainerWithPipeline getContainerWithPipeline(long containerId) - throws IOException { - return storageContainerLocationClient.getContainerWithPipeline(containerId); - } - - /** - * Close a container. - * - * @param pipeline the container to be closed. - * @throws IOException - */ - @Override - public void closeContainer(long containerId, Pipeline pipeline) - throws IOException { - XceiverClientSpi client = null; - try { - if (LOG.isDebugEnabled()) { - LOG.debug("Close container {}", pipeline); - } - /* - TODO: two orders here, revisit this later: - 1. close on SCM first, then on data node - 2. close on data node first, then on SCM - - with 1: if client failed after closing on SCM, then there is a - container SCM thinks as closed, but is actually open. Then SCM will no - longer allocate block to it, which is fine. But SCM may later try to - replicate this "closed" container, which I'm not sure is safe. - - with 2: if client failed after close on datanode, then there is a - container SCM thinks as open, but is actually closed. Then SCM will still - try to allocate block to it. Which will fail when actually doing the - write. No more data can be written, but at least the correctness and - consistency of existing data will maintain. - - For now, take the #2 way. - */ - // Actually close the container on Datanode - client = xceiverClientManager.acquireClient(pipeline); - - storageContainerLocationClient.notifyObjectStageChange( - ObjectStageChangeRequestProto.Type.container, - containerId, - ObjectStageChangeRequestProto.Op.close, - ObjectStageChangeRequestProto.Stage.begin); - - ContainerProtocolCalls.closeContainer(client, containerId, - null); - // Notify SCM to close the container - storageContainerLocationClient.notifyObjectStageChange( - ObjectStageChangeRequestProto.Type.container, - containerId, - ObjectStageChangeRequestProto.Op.close, - ObjectStageChangeRequestProto.Stage.complete); - } finally { - if (client != null) { - xceiverClientManager.releaseClient(client, false); - } - } - } - - /** - * Close a container. - * - * @throws IOException - */ - @Override - public void closeContainer(long containerId) - throws IOException { - ContainerWithPipeline info = getContainerWithPipeline(containerId); - Pipeline pipeline = info.getPipeline(); - closeContainer(containerId, pipeline); - } - - /** - * Get the the current usage information. - * @param containerID - ID of the container. - * @return the size of the given container. - * @throws IOException - */ - @Override - public long getContainerSize(long containerID) throws IOException { - // TODO : Fix this, it currently returns the capacity - // but not the current usage. - long size = getContainerSizeB(); - if (size == -1) { - throw new IOException("Container size unknown!"); - } - return size; - } - - /** - * Check if SCM is in safe mode. - * - * @return Returns true if SCM is in safe mode else returns false. - * @throws IOException - */ - public boolean inSafeMode() throws IOException { - return storageContainerLocationClient.inSafeMode(); - } - - /** - * Force SCM out of safe mode. - * - * @return returns true if operation is successful. - * @throws IOException - */ - public boolean forceExitSafeMode() throws IOException { - return storageContainerLocationClient.forceExitSafeMode(); - } - - @Override - public void startReplicationManager() throws IOException { - storageContainerLocationClient.startReplicationManager(); - } - - @Override - public void stopReplicationManager() throws IOException { - storageContainerLocationClient.stopReplicationManager(); - } - - @Override - public boolean getReplicationManagerStatus() throws IOException { - return storageContainerLocationClient.getReplicationManagerStatus(); - } - - -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java deleted file mode 100644 index d3bb31aa69878..0000000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java +++ /dev/null @@ -1,350 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.client; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB; -import org.apache.hadoop.hdds.scm.XceiverClientManager.ScmClientConfig; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClients; -import org.apache.ratis.protocol.AlreadyClosedException; -import org.apache.ratis.protocol.GroupMismatchException; -import org.apache.ratis.protocol.NotReplicatedException; -import org.apache.ratis.protocol.RaftRetryFailureException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.text.ParseException; -import java.time.Instant; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.time.format.DateTimeFormatter; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * Utility methods for Ozone and Container Clients. - * - * The methods to retrieve SCM service endpoints assume there is a single - * SCM service instance. This will change when we switch to replicated service - * instances for redundancy. - */ -@InterfaceAudience.Public -@InterfaceStability.Unstable -public final class HddsClientUtils { - - private static final Logger LOG = LoggerFactory.getLogger( - HddsClientUtils.class); - - private static final int NO_PORT = -1; - - private HddsClientUtils() { - } - - private static final List> EXCEPTION_LIST = - new ArrayList>() {{ - add(TimeoutException.class); - add(StorageContainerException.class); - add(RaftRetryFailureException.class); - add(AlreadyClosedException.class); - add(GroupMismatchException.class); - // Not Replicated Exception will be thrown if watch For commit - // does not succeed - add(NotReplicatedException.class); - }}; - - /** - * Date format that used in ozone. Here the format is thread safe to use. - */ - private static final ThreadLocal DATE_FORMAT = - ThreadLocal.withInitial(() -> { - DateTimeFormatter format = - DateTimeFormatter.ofPattern(OzoneConsts.OZONE_DATE_FORMAT); - return format.withZone(ZoneId.of(OzoneConsts.OZONE_TIME_ZONE)); - }); - - - /** - * Convert time in millisecond to a human readable format required in ozone. - * @return a human readable string for the input time - */ - public static String formatDateTime(long millis) { - ZonedDateTime dateTime = ZonedDateTime.ofInstant( - Instant.ofEpochMilli(millis), DATE_FORMAT.get().getZone()); - return DATE_FORMAT.get().format(dateTime); - } - - /** - * Convert time in ozone date format to millisecond. - * @return time in milliseconds - */ - public static long formatDateTime(String date) throws ParseException { - Preconditions.checkNotNull(date, "Date string should not be null."); - return ZonedDateTime.parse(date, DATE_FORMAT.get()) - .toInstant().toEpochMilli(); - } - - /** - * verifies that bucket name / volume name is a valid DNS name. - * - * @param resName Bucket or volume Name to be validated - * - * @throws IllegalArgumentException - */ - public static void verifyResourceName(String resName) - throws IllegalArgumentException { - if (resName == null) { - throw new IllegalArgumentException("Bucket or Volume name is null"); - } - - if (resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH || - resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH) { - throw new IllegalArgumentException( - "Bucket or Volume length is illegal, " - + "valid length is 3-63 characters"); - } - - if (resName.charAt(0) == '.' || resName.charAt(0) == '-') { - throw new IllegalArgumentException( - "Bucket or Volume name cannot start with a period or dash"); - } - - if (resName.charAt(resName.length() - 1) == '.' || - resName.charAt(resName.length() - 1) == '-') { - throw new IllegalArgumentException("Bucket or Volume name " - + "cannot end with a period or dash"); - } - - boolean isIPv4 = true; - char prev = (char) 0; - - for (int index = 0; index < resName.length(); index++) { - char currChar = resName.charAt(index); - if (currChar != '.') { - isIPv4 = ((currChar >= '0') && (currChar <= '9')) && isIPv4; - } - if (currChar > 'A' && currChar < 'Z') { - throw new IllegalArgumentException( - "Bucket or Volume name does not support uppercase characters"); - } - if (currChar != '.' && currChar != '-') { - if (currChar < '0' || (currChar > '9' && currChar < 'a') || - currChar > 'z') { - throw new IllegalArgumentException("Bucket or Volume name has an " + - "unsupported character : " + - currChar); - } - } - if (prev == '.' && currChar == '.') { - throw new IllegalArgumentException("Bucket or Volume name should not " + - "have two contiguous periods"); - } - if (prev == '-' && currChar == '.') { - throw new IllegalArgumentException( - "Bucket or Volume name should not have period after dash"); - } - if (prev == '.' && currChar == '-') { - throw new IllegalArgumentException( - "Bucket or Volume name should not have dash after period"); - } - prev = currChar; - } - - if (isIPv4) { - throw new IllegalArgumentException( - "Bucket or Volume name cannot be an IPv4 address or all numeric"); - } - } - - /** - * verifies that bucket / volume name is a valid DNS name. - * - * @param resourceNames Array of bucket / volume names to be verified. - */ - public static void verifyResourceName(String... resourceNames) { - for (String resourceName : resourceNames) { - HddsClientUtils.verifyResourceName(resourceName); - } - } - - /** - * Checks that object parameters passed as reference is not null. - * - * @param references Array of object references to be checked. - * @param - */ - public static void checkNotNull(T... references) { - for (T ref: references) { - Preconditions.checkNotNull(ref); - } - } - - /** - * Returns the cache value to be used for list calls. - * @param conf Configuration object - * @return list cache size - */ - public static int getListCacheSize(Configuration conf) { - return conf.getInt(OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE, - OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT); - } - - /** - * @return a default instance of {@link CloseableHttpClient}. - */ - public static CloseableHttpClient newHttpClient() { - return HddsClientUtils.newHttpClient(new Configuration()); - } - - /** - * Returns a {@link CloseableHttpClient} configured by given configuration. - * If conf is null, returns a default instance. - * - * @param conf configuration - * @return a {@link CloseableHttpClient} instance. - */ - public static CloseableHttpClient newHttpClient(Configuration conf) { - long socketTimeout = OzoneConfigKeys - .OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT; - long connectionTimeout = OzoneConfigKeys - .OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT; - if (conf != null) { - socketTimeout = conf.getTimeDuration( - OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT, - OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - connectionTimeout = conf.getTimeDuration( - OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT, - OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - } - - CloseableHttpClient client = HttpClients.custom() - .setDefaultRequestConfig( - RequestConfig.custom() - .setSocketTimeout(Math.toIntExact(socketTimeout)) - .setConnectTimeout(Math.toIntExact(connectionTimeout)) - .build()) - .build(); - return client; - } - - /** - * Returns the maximum no of outstanding async requests to be handled by - * Standalone and Ratis client. - */ - public static int getMaxOutstandingRequests(Configuration config) { - return OzoneConfiguration.of(config) - .getObject(ScmClientConfig.class) - .getMaxOutstandingRequests(); - } - - /** - * Create a scm block client, used by putKey() and getKey(). - * - * @return {@link ScmBlockLocationProtocol} - * @throws IOException - */ - public static SCMSecurityProtocol getScmSecurityClient( - OzoneConfiguration conf, UserGroupInformation ugi) throws IOException { - RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class, - ProtobufRpcEngine.class); - long scmVersion = - RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class); - InetSocketAddress scmSecurityProtoAdd = - HddsUtils.getScmAddressForSecurityProtocol(conf); - SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient = - new SCMSecurityProtocolClientSideTranslatorPB( - RPC.getProxy(SCMSecurityProtocolPB.class, scmVersion, - scmSecurityProtoAdd, ugi, conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))); - return scmSecurityClient; - } - - public static Throwable checkForException(Exception e) { - Throwable t = e; - while (t != null) { - for (Class cls : getExceptionList()) { - if (cls.isInstance(t)) { - return t; - } - } - t = t.getCause(); - } - return t; - } - - public static RetryPolicy createRetryPolicy(int maxRetryCount, - long retryInterval) { - // retry with fixed sleep between retries - return RetryPolicies.retryUpToMaximumCountWithFixedSleep( - maxRetryCount, retryInterval, TimeUnit.MILLISECONDS); - } - - public static Map, - RetryPolicy> getRetryPolicyByException(int maxRetryCount, - long retryInterval) { - Map, RetryPolicy> policyMap = new HashMap<>(); - for (Class ex : EXCEPTION_LIST) { - if (ex == TimeoutException.class - || ex == RaftRetryFailureException.class) { - // retry without sleep - policyMap.put(ex, createRetryPolicy(maxRetryCount, 0)); - } else { - // retry with fixed sleep between retries - policyMap.put(ex, createRetryPolicy(maxRetryCount, retryInterval)); - } - } - // Default retry policy - policyMap - .put(Exception.class, createRetryPolicy(maxRetryCount, retryInterval)); - return policyMap; - } - - public static List> getExceptionList() { - return EXCEPTION_LIST; - } -} \ No newline at end of file diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java deleted file mode 100644 index 73ad78cd7872c..0000000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.client; - -/** - * Client facing classes for the container operations. - */ \ No newline at end of file diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java deleted file mode 100644 index 9390bc102034c..0000000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -/** - * Classes for different type of container service client. - */ \ No newline at end of file diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java deleted file mode 100644 index 40bbd93b16f1e..0000000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ /dev/null @@ -1,388 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -import com.google.common.annotations.VisibleForTesting; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.fs.Seekable; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.GetBlockResponseProto; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.EOFException; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * An {@link InputStream} called from KeyInputStream to read a block from the - * container. - * This class encapsulates all state management for iterating - * through the sequence of chunks through {@link ChunkInputStream}. - */ -public class BlockInputStream extends InputStream implements Seekable { - - private static final Logger LOG = - LoggerFactory.getLogger(BlockInputStream.class); - - private static final int EOF = -1; - - private final BlockID blockID; - private final long length; - private Pipeline pipeline; - private final Token token; - private final boolean verifyChecksum; - private XceiverClientManager xceiverClientManager; - private XceiverClientSpi xceiverClient; - private boolean initialized = false; - - // List of ChunkInputStreams, one for each chunk in the block - private List chunkStreams; - - // chunkOffsets[i] stores the index of the first data byte in - // chunkStream i w.r.t the block data. - // Let’s say we have chunk size as 40 bytes. And let's say the parent - // block stores data from index 200 and has length 400. - // The first 40 bytes of this block will be stored in chunk[0], next 40 in - // chunk[1] and so on. But since the chunkOffsets are w.r.t the block only - // and not the key, the values in chunkOffsets will be [0, 40, 80,....]. - private long[] chunkOffsets = null; - - // Index of the chunkStream corresponding to the current position of the - // BlockInputStream i.e offset of the data to be read next from this block - private int chunkIndex; - - // Position of the BlockInputStream is maintainted by this variable till - // the stream is initialized. This position is w.r.t to the block only and - // not the key. - // For the above example, if we seek to position 240 before the stream is - // initialized, then value of blockPosition will be set to 40. - // Once, the stream is initialized, the position of the stream - // will be determined by the current chunkStream and its position. - private long blockPosition = 0; - - // Tracks the chunkIndex corresponding to the last blockPosition so that it - // can be reset if a new position is seeked. - private int chunkIndexOfPrevPosition; - - public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline, - Token token, boolean verifyChecksum, - XceiverClientManager xceiverClientManager) { - this.blockID = blockId; - this.length = blockLen; - this.pipeline = pipeline; - this.token = token; - this.verifyChecksum = verifyChecksum; - this.xceiverClientManager = xceiverClientManager; - } - - /** - * Initialize the BlockInputStream. Get the BlockData (list of chunks) from - * the Container and create the ChunkInputStreams for each Chunk in the Block. - */ - public synchronized void initialize() throws IOException { - - // Pre-check that the stream has not been intialized already - if (initialized) { - return; - } - - List chunks = getChunkInfos(); - if (chunks != null && !chunks.isEmpty()) { - // For each chunk in the block, create a ChunkInputStream and compute - // its chunkOffset - this.chunkOffsets = new long[chunks.size()]; - long tempOffset = 0; - - this.chunkStreams = new ArrayList<>(chunks.size()); - for (int i = 0; i < chunks.size(); i++) { - addStream(chunks.get(i)); - chunkOffsets[i] = tempOffset; - tempOffset += chunks.get(i).getLen(); - } - - initialized = true; - this.chunkIndex = 0; - - if (blockPosition > 0) { - // Stream was seeked to blockPosition before initialization. Seek to the - // blockPosition now. - seek(blockPosition); - } - } - } - - /** - * Send RPC call to get the block info from the container. - * @return List of chunks in this block. - */ - protected List getChunkInfos() throws IOException { - // irrespective of the container state, we will always read via Standalone - // protocol. - if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) { - pipeline = Pipeline.newBuilder(pipeline) - .setType(HddsProtos.ReplicationType.STAND_ALONE).build(); - } - xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline); - boolean success = false; - List chunks; - try { - if (LOG.isDebugEnabled()) { - LOG.debug("Initializing BlockInputStream for get key to access {}", - blockID.getContainerID()); - } - - if (token != null) { - UserGroupInformation.getCurrentUser().addToken(token); - } - DatanodeBlockID datanodeBlockID = blockID - .getDatanodeBlockIDProtobuf(); - GetBlockResponseProto response = ContainerProtocolCalls - .getBlock(xceiverClient, datanodeBlockID); - - chunks = response.getBlockData().getChunksList(); - success = true; - } finally { - if (!success) { - xceiverClientManager.releaseClientForReadData(xceiverClient, false); - } - } - - return chunks; - } - - /** - * Append another ChunkInputStream to the end of the list. Note that the - * ChunkInputStream is only created here. The chunk will be read from the - * Datanode only when a read operation is performed on for that chunk. - */ - protected synchronized void addStream(ChunkInfo chunkInfo) { - chunkStreams.add(new ChunkInputStream(chunkInfo, blockID, - xceiverClient, verifyChecksum)); - } - - public synchronized long getRemaining() throws IOException { - return length - getPos(); - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized int read() throws IOException { - byte[] buf = new byte[1]; - if (read(buf, 0, 1) == EOF) { - return EOF; - } - return Byte.toUnsignedInt(buf[0]); - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized int read(byte[] b, int off, int len) throws IOException { - if (b == null) { - throw new NullPointerException(); - } - if (off < 0 || len < 0 || len > b.length - off) { - throw new IndexOutOfBoundsException(); - } - if (len == 0) { - return 0; - } - - if (!initialized) { - initialize(); - } - - checkOpen(); - int totalReadLen = 0; - while (len > 0) { - // if we are at the last chunk and have read the entire chunk, return - if (chunkStreams.size() == 0 || - (chunkStreams.size() - 1 <= chunkIndex && - chunkStreams.get(chunkIndex) - .getRemaining() == 0)) { - return totalReadLen == 0 ? EOF : totalReadLen; - } - - // Get the current chunkStream and read data from it - ChunkInputStream current = chunkStreams.get(chunkIndex); - int numBytesToRead = Math.min(len, (int)current.getRemaining()); - int numBytesRead = current.read(b, off, numBytesToRead); - if (numBytesRead != numBytesToRead) { - // This implies that there is either data loss or corruption in the - // chunk entries. Even EOF in the current stream would be covered in - // this case. - throw new IOException(String.format( - "Inconsistent read for chunkName=%s length=%d numBytesRead=%d", - current.getChunkName(), current.getLength(), numBytesRead)); - } - totalReadLen += numBytesRead; - off += numBytesRead; - len -= numBytesRead; - if (current.getRemaining() <= 0 && - ((chunkIndex + 1) < chunkStreams.size())) { - chunkIndex += 1; - } - } - return totalReadLen; - } - - /** - * Seeks the BlockInputStream to the specified position. If the stream is - * not initialized, save the seeked position via blockPosition. Otherwise, - * update the position in 2 steps: - * 1. Updating the chunkIndex to the chunkStream corresponding to the - * seeked position. - * 2. Seek the corresponding chunkStream to the adjusted position. - * - * Let’s say we have chunk size as 40 bytes. And let's say the parent block - * stores data from index 200 and has length 400. If the key was seeked to - * position 90, then this block will be seeked to position 90. - * When seek(90) is called on this blockStream, then - * 1. chunkIndex will be set to 2 (as indices 80 - 120 reside in chunk[2]). - * 2. chunkStream[2] will be seeked to position 10 - * (= 90 - chunkOffset[2] (= 80)). - */ - @Override - public synchronized void seek(long pos) throws IOException { - if (!initialized) { - // Stream has not been initialized yet. Save the position so that it - // can be seeked when the stream is initialized. - blockPosition = pos; - return; - } - - checkOpen(); - if (pos < 0 || pos >= length) { - if (pos == 0) { - // It is possible for length and pos to be zero in which case - // seek should return instead of throwing exception - return; - } - throw new EOFException( - "EOF encountered at pos: " + pos + " for block: " + blockID); - } - - if (chunkIndex >= chunkStreams.size()) { - chunkIndex = Arrays.binarySearch(chunkOffsets, pos); - } else if (pos < chunkOffsets[chunkIndex]) { - chunkIndex = - Arrays.binarySearch(chunkOffsets, 0, chunkIndex, pos); - } else if (pos >= chunkOffsets[chunkIndex] + chunkStreams - .get(chunkIndex).getLength()) { - chunkIndex = Arrays.binarySearch(chunkOffsets, - chunkIndex + 1, chunkStreams.size(), pos); - } - if (chunkIndex < 0) { - // Binary search returns -insertionPoint - 1 if element is not present - // in the array. insertionPoint is the point at which element would be - // inserted in the sorted array. We need to adjust the chunkIndex - // accordingly so that chunkIndex = insertionPoint - 1 - chunkIndex = -chunkIndex - 2; - } - - // Reset the previous chunkStream's position - chunkStreams.get(chunkIndexOfPrevPosition).resetPosition(); - - // seek to the proper offset in the ChunkInputStream - chunkStreams.get(chunkIndex).seek(pos - chunkOffsets[chunkIndex]); - chunkIndexOfPrevPosition = chunkIndex; - } - - @Override - public synchronized long getPos() throws IOException { - if (length == 0) { - return 0; - } - - if (!initialized) { - // The stream is not initialized yet. Return the blockPosition - return blockPosition; - } else { - return chunkOffsets[chunkIndex] + chunkStreams.get(chunkIndex).getPos(); - } - } - - @Override - public boolean seekToNewSource(long targetPos) throws IOException { - return false; - } - - @Override - public synchronized void close() { - if (xceiverClientManager != null && xceiverClient != null) { - xceiverClientManager.releaseClient(xceiverClient, false); - xceiverClientManager = null; - xceiverClient = null; - } - } - - public synchronized void resetPosition() { - this.blockPosition = 0; - } - - /** - * Checks if the stream is open. If not, throw an exception. - * - * @throws IOException if stream is closed - */ - protected synchronized void checkOpen() throws IOException { - if (xceiverClient == null) { - throw new IOException("BlockInputStream has been closed."); - } - } - - public BlockID getBlockID() { - return blockID; - } - - public long getLength() { - return length; - } - - @VisibleForTesting - synchronized int getChunkIndex() { - return chunkIndex; - } - - @VisibleForTesting - synchronized long getBlockPosition() { - return blockPosition; - } - - @VisibleForTesting - synchronized List getChunkStreams() { - return chunkStreams; - } -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java deleted file mode 100644 index b15ca3f6c85fc..0000000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ /dev/null @@ -1,640 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.XceiverClientReply; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; -import org.apache.hadoop.ozone.common.OzoneChecksumException; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue; -import org.apache.hadoop.hdds.client.BlockID; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicReference; - -import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls - .putBlockAsync; -import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls - .writeChunkAsync; - -/** - * An {@link OutputStream} used by the REST service in combination with the - * SCMClient to write the value of a key to a sequence - * of container chunks. Writes are buffered locally and periodically written to - * the container as a new chunk. In order to preserve the semantics that - * replacement of a pre-existing key is atomic, each instance of the stream has - * an internal unique identifier. This unique identifier and a monotonically - * increasing chunk index form a composite key that is used as the chunk name. - * After all data is written, a putKey call creates or updates the corresponding - * container key, and this call includes the full list of chunks that make up - * the key data. The list of chunks is updated all at once. Therefore, a - * concurrent reader never can see an intermediate state in which different - * chunks of data from different versions of the key data are interleaved. - * This class encapsulates all state management for buffering and writing - * through to the container. - */ -public class BlockOutputStream extends OutputStream { - public static final Logger LOG = - LoggerFactory.getLogger(BlockOutputStream.class); - - private volatile BlockID blockID; - - private final BlockData.Builder containerBlockData; - private XceiverClientManager xceiverClientManager; - private XceiverClientSpi xceiverClient; - private final ContainerProtos.ChecksumType checksumType; - private final int bytesPerChecksum; - private int chunkIndex; - private int chunkSize; - private final long streamBufferFlushSize; - private final long streamBufferMaxSize; - private BufferPool bufferPool; - // The IOException will be set by response handling thread in case there is an - // exception received in the response. If the exception is set, the next - // request will fail upfront. - private AtomicReference ioException; - private ExecutorService responseExecutor; - - // the effective length of data flushed so far - private long totalDataFlushedLength; - - // effective data write attempted so far for the block - private long writtenDataLength; - - // List containing buffers for which the putBlock call will - // update the length in the datanodes. This list will just maintain - // references to the buffers in the BufferPool which will be cleared - // when the watchForCommit acknowledges a putBlock logIndex has been - // committed on all datanodes. This list will be a place holder for buffers - // which got written between successive putBlock calls. - private List bufferList; - - // This object will maintain the commitIndexes and byteBufferList in order - // Also, corresponding to the logIndex, the corresponding list of buffers will - // be released from the buffer pool. - private final CommitWatcher commitWatcher; - - private List failedServers; - - /** - * Creates a new BlockOutputStream. - * - * @param blockID block ID - * @param xceiverClientManager client manager that controls client - * @param pipeline pipeline where block will be written - * @param chunkSize chunk size - * @param bufferPool pool of buffers - * @param streamBufferFlushSize flush size - * @param streamBufferMaxSize max size of the currentBuffer - * @param watchTimeout watch timeout - * @param checksumType checksum type - * @param bytesPerChecksum Bytes per checksum - */ - @SuppressWarnings("parameternumber") - public BlockOutputStream(BlockID blockID, - XceiverClientManager xceiverClientManager, Pipeline pipeline, - int chunkSize, long streamBufferFlushSize, long streamBufferMaxSize, - long watchTimeout, BufferPool bufferPool, ChecksumType checksumType, - int bytesPerChecksum) - throws IOException { - this.blockID = blockID; - this.chunkSize = chunkSize; - KeyValue keyValue = - KeyValue.newBuilder().setKey("TYPE").setValue("KEY").build(); - this.containerBlockData = - BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf()) - .addMetadata(keyValue); - this.xceiverClientManager = xceiverClientManager; - this.xceiverClient = xceiverClientManager.acquireClient(pipeline); - this.chunkIndex = 0; - this.streamBufferFlushSize = streamBufferFlushSize; - this.streamBufferMaxSize = streamBufferMaxSize; - this.bufferPool = bufferPool; - this.checksumType = checksumType; - this.bytesPerChecksum = bytesPerChecksum; - - // A single thread executor handle the responses of async requests - responseExecutor = Executors.newSingleThreadExecutor(); - commitWatcher = new CommitWatcher(bufferPool, xceiverClient, watchTimeout); - bufferList = null; - totalDataFlushedLength = 0; - writtenDataLength = 0; - failedServers = new ArrayList<>(0); - ioException = new AtomicReference<>(null); - } - - - public BlockID getBlockID() { - return blockID; - } - - public long getTotalAckDataLength() { - return commitWatcher.getTotalAckDataLength(); - } - - public long getWrittenDataLength() { - return writtenDataLength; - } - - public List getFailedServers() { - return failedServers; - } - - @VisibleForTesting - public XceiverClientSpi getXceiverClient() { - return xceiverClient; - } - - @VisibleForTesting - public long getTotalDataFlushedLength() { - return totalDataFlushedLength; - } - - @VisibleForTesting - public BufferPool getBufferPool() { - return bufferPool; - } - - public IOException getIoException() { - return ioException.get(); - } - - @VisibleForTesting - public Map> getCommitIndex2flushedDataMap() { - return commitWatcher.getCommitIndex2flushedDataMap(); - } - - @Override - public void write(int b) throws IOException { - checkOpen(); - byte[] buf = new byte[1]; - buf[0] = (byte) b; - write(buf, 0, 1); - } - - @Override - public void write(byte[] b, int off, int len) throws IOException { - checkOpen(); - if (b == null) { - throw new NullPointerException(); - } - if ((off < 0) || (off > b.length) || (len < 0) || ((off + len) > b.length) - || ((off + len) < 0)) { - throw new IndexOutOfBoundsException(); - } - if (len == 0) { - return; - } - - while (len > 0) { - int writeLen; - // Allocate a buffer if needed. The buffer will be allocated only - // once as needed and will be reused again for multiple blockOutputStream - // entries. - ByteBuffer currentBuffer = bufferPool.allocateBufferIfNeeded(); - int pos = currentBuffer.position(); - writeLen = - Math.min(chunkSize - pos % chunkSize, len); - currentBuffer.put(b, off, writeLen); - if (!currentBuffer.hasRemaining()) { - writeChunk(currentBuffer); - } - off += writeLen; - len -= writeLen; - writtenDataLength += writeLen; - if (shouldFlush()) { - updateFlushLength(); - executePutBlock(); - } - // Data in the bufferPool can not exceed streamBufferMaxSize - if (isBufferPoolFull()) { - handleFullBuffer(); - } - } - } - - private boolean shouldFlush() { - return bufferPool.computeBufferData() % streamBufferFlushSize == 0; - } - - private void updateFlushLength() { - totalDataFlushedLength += writtenDataLength - totalDataFlushedLength; - } - - private boolean isBufferPoolFull() { - return bufferPool.computeBufferData() == streamBufferMaxSize; - } - /** - * Will be called on the retryPath in case closedContainerException/ - * TimeoutException. - * @param len length of data to write - * @throws IOException if error occurred - */ - - // In this case, the data is already cached in the currentBuffer. - public void writeOnRetry(long len) throws IOException { - if (len == 0) { - return; - } - int count = 0; - Preconditions.checkArgument(len <= streamBufferMaxSize); - while (len > 0) { - long writeLen; - writeLen = Math.min(chunkSize, len); - if (writeLen == chunkSize) { - writeChunk(bufferPool.getBuffer(count)); - } - len -= writeLen; - count++; - writtenDataLength += writeLen; - // we should not call isBufferFull/shouldFlush here. - // The buffer might already be full as whole data is already cached in - // the buffer. We should just validate - // if we wrote data of size streamBufferMaxSize/streamBufferFlushSize to - // call for handling full buffer/flush buffer condition. - if (writtenDataLength % streamBufferFlushSize == 0) { - // reset the position to zero as now we will be reading the - // next buffer in the list - updateFlushLength(); - executePutBlock(); - } - if (writtenDataLength == streamBufferMaxSize) { - handleFullBuffer(); - } - } - } - - /** - * This is a blocking call. It will wait for the flush till the commit index - * at the head of the commitIndex2flushedDataMap gets replicated to all or - * majority. - * @throws IOException - */ - private void handleFullBuffer() throws IOException { - try { - checkOpen(); - if (!commitWatcher.getFutureMap().isEmpty()) { - waitOnFlushFutures(); - } - } catch (InterruptedException | ExecutionException e) { - setIoException(e); - adjustBuffersOnException(); - throw getIoException(); - } - watchForCommit(true); - } - - - // It may happen that once the exception is encountered , we still might - // have successfully flushed up to a certain index. Make sure the buffers - // only contain data which have not been sufficiently replicated - private void adjustBuffersOnException() { - commitWatcher.releaseBuffersOnException(); - } - - /** - * calls watchForCommit API of the Ratis Client. For Standalone client, - * it is a no op. - * @param bufferFull flag indicating whether bufferFull condition is hit or - * its called as part flush/close - * @return minimum commit index replicated to all nodes - * @throws IOException IOException in case watch gets timed out - */ - private void watchForCommit(boolean bufferFull) throws IOException { - checkOpen(); - try { - XceiverClientReply reply = bufferFull ? - commitWatcher.watchOnFirstIndex() : commitWatcher.watchOnLastIndex(); - if (reply != null) { - List dnList = reply.getDatanodes(); - if (!dnList.isEmpty()) { - Pipeline pipe = xceiverClient.getPipeline(); - - LOG.warn("Failed to commit BlockId {} on {}. Failed nodes: {}", - blockID, pipe, dnList); - failedServers.addAll(dnList); - } - } - } catch (IOException ioe) { - setIoException(ioe); - throw getIoException(); - } - } - - private CompletableFuture executePutBlock() - throws IOException { - checkOpen(); - long flushPos = totalDataFlushedLength; - Preconditions.checkNotNull(bufferList); - List byteBufferList = bufferList; - bufferList = null; - Preconditions.checkNotNull(byteBufferList); - - CompletableFuture flushFuture; - try { - XceiverClientReply asyncReply = - putBlockAsync(xceiverClient, containerBlockData.build()); - CompletableFuture future = - asyncReply.getResponse(); - flushFuture = future.thenApplyAsync(e -> { - try { - validateResponse(e); - } catch (IOException sce) { - throw new CompletionException(sce); - } - // if the ioException is not set, putBlock is successful - if (getIoException() == null) { - BlockID responseBlockID = BlockID.getFromProtobuf( - e.getPutBlock().getCommittedBlockLength().getBlockID()); - Preconditions.checkState(blockID.getContainerBlockID() - .equals(responseBlockID.getContainerBlockID())); - // updates the bcsId of the block - blockID = responseBlockID; - if (LOG.isDebugEnabled()) { - LOG.debug( - "Adding index " + asyncReply.getLogIndex() + " commitMap size " - + commitWatcher.getCommitInfoMapSize() + " flushLength " - + flushPos + " numBuffers " + byteBufferList.size() - + " blockID " + blockID + " bufferPool size" + bufferPool - .getSize() + " currentBufferIndex " + bufferPool - .getCurrentBufferIndex()); - } - // for standalone protocol, logIndex will always be 0. - commitWatcher - .updateCommitInfoMap(asyncReply.getLogIndex(), byteBufferList); - } - return e; - }, responseExecutor).exceptionally(e -> { - if (LOG.isDebugEnabled()) { - LOG.debug( - "putBlock failed for blockID " + blockID + " with exception " + e - .getLocalizedMessage()); - } - CompletionException ce = new CompletionException(e); - setIoException(ce); - throw ce; - }); - } catch (IOException | InterruptedException | ExecutionException e) { - throw new IOException( - "Unexpected Storage Container Exception: " + e.toString(), e); - } - commitWatcher.getFutureMap().put(flushPos, flushFuture); - return flushFuture; - } - - @Override - public void flush() throws IOException { - if (xceiverClientManager != null && xceiverClient != null - && bufferPool != null && bufferPool.getSize() > 0) { - try { - handleFlush(); - } catch (InterruptedException | ExecutionException e) { - // just set the exception here as well in order to maintain sanctity of - // ioException field - setIoException(e); - adjustBuffersOnException(); - throw getIoException(); - } - } - } - - - private void writeChunk(ByteBuffer buffer) - throws IOException { - // This data in the buffer will be pushed to datanode and a reference will - // be added to the bufferList. Once putBlock gets executed, this list will - // be marked null. Hence, during first writeChunk call after every putBlock - // call or during the first call to writeChunk here, the list will be null. - - if (bufferList == null) { - bufferList = new ArrayList<>(); - } - bufferList.add(buffer); - // Please note : We are not flipping the slice when we write since - // the slices are pointing the currentBuffer start and end as needed for - // the chunk write. Also please note, Duplicate does not create a - // copy of data, it only creates metadata that points to the data - // stream. - ByteBuffer chunk = buffer.duplicate(); - chunk.position(0); - chunk.limit(buffer.position()); - writeChunkToContainer(chunk); - } - - private void handleFlush() - throws IOException, InterruptedException, ExecutionException { - checkOpen(); - // flush the last chunk data residing on the currentBuffer - if (totalDataFlushedLength < writtenDataLength) { - ByteBuffer currentBuffer = bufferPool.getCurrentBuffer(); - Preconditions.checkArgument(currentBuffer.position() > 0); - if (currentBuffer.position() != chunkSize) { - writeChunk(currentBuffer); - } - // This can be a partially filled chunk. Since we are flushing the buffer - // here, we just limit this buffer to the current position. So that next - // write will happen in new buffer - updateFlushLength(); - executePutBlock(); - } - waitOnFlushFutures(); - watchForCommit(false); - // just check again if the exception is hit while waiting for the - // futures to ensure flush has indeed succeeded - - // irrespective of whether the commitIndex2flushedDataMap is empty - // or not, ensure there is no exception set - checkOpen(); - } - - @Override - public void close() throws IOException { - if (xceiverClientManager != null && xceiverClient != null - && bufferPool != null && bufferPool.getSize() > 0) { - try { - handleFlush(); - } catch (InterruptedException | ExecutionException e) { - setIoException(e); - adjustBuffersOnException(); - throw getIoException(); - } finally { - cleanup(false); - } - // TODO: Turn the below buffer empty check on when Standalone pipeline - // is removed in the write path in tests - // Preconditions.checkArgument(buffer.position() == 0); - // bufferPool.checkBufferPoolEmpty(); - - } - } - - private void waitOnFlushFutures() - throws InterruptedException, ExecutionException { - CompletableFuture combinedFuture = CompletableFuture.allOf( - commitWatcher.getFutureMap().values().toArray( - new CompletableFuture[commitWatcher.getFutureMap().size()])); - // wait for all the transactions to complete - combinedFuture.get(); - } - - private void validateResponse( - ContainerProtos.ContainerCommandResponseProto responseProto) - throws IOException { - try { - // if the ioException is already set, it means a prev request has failed - // just throw the exception. The current operation will fail with the - // original error - IOException exception = getIoException(); - if (exception != null) { - throw exception; - } - ContainerProtocolCalls.validateContainerResponse(responseProto); - } catch (StorageContainerException sce) { - LOG.error("Unexpected Storage Container Exception: ", sce); - setIoException(sce); - throw sce; - } - } - - - private void setIoException(Exception e) { - if (getIoException() == null) { - IOException exception = new IOException( - "Unexpected Storage Container Exception: " + e.toString(), e); - ioException.compareAndSet(null, exception); - } - } - - public void cleanup(boolean invalidateClient) { - if (xceiverClientManager != null) { - xceiverClientManager.releaseClient(xceiverClient, invalidateClient); - } - xceiverClientManager = null; - xceiverClient = null; - commitWatcher.cleanup(); - if (bufferList != null) { - bufferList.clear(); - } - bufferList = null; - responseExecutor.shutdown(); - } - - /** - * Checks if the stream is open or exception has occured. - * If not, throws an exception. - * - * @throws IOException if stream is closed - */ - private void checkOpen() throws IOException { - if (isClosed()) { - throw new IOException("BlockOutputStream has been closed."); - } else if (getIoException() != null) { - adjustBuffersOnException(); - throw getIoException(); - } - } - - public boolean isClosed() { - return xceiverClient == null; - } - - /** - * Writes buffered data as a new chunk to the container and saves chunk - * information to be used later in putKey call. - * - * @throws IOException if there is an I/O error while performing the call - * @throws OzoneChecksumException if there is an error while computing - * checksum - */ - private void writeChunkToContainer(ByteBuffer chunk) throws IOException { - int effectiveChunkSize = chunk.remaining(); - ByteString data = bufferPool.byteStringConversion().apply(chunk); - Checksum checksum = new Checksum(checksumType, bytesPerChecksum); - ChecksumData checksumData = checksum.computeChecksum(chunk); - ChunkInfo chunkInfo = ChunkInfo.newBuilder() - .setChunkName(blockID.getLocalID() + "_chunk_" + ++chunkIndex) - .setOffset(0) - .setLen(effectiveChunkSize) - .setChecksumData(checksumData.getProtoBufMessage()) - .build(); - - try { - XceiverClientReply asyncReply = - writeChunkAsync(xceiverClient, chunkInfo, blockID, data); - CompletableFuture future = - asyncReply.getResponse(); - future.thenApplyAsync(e -> { - try { - validateResponse(e); - } catch (IOException sce) { - future.completeExceptionally(sce); - } - return e; - }, responseExecutor).exceptionally(e -> { - if (LOG.isDebugEnabled()) { - LOG.debug( - "writing chunk failed " + chunkInfo.getChunkName() + " blockID " - + blockID + " with exception " + e.getLocalizedMessage()); - } - CompletionException ce = new CompletionException(e); - setIoException(ce); - throw ce; - }); - } catch (IOException | InterruptedException | ExecutionException e) { - throw new IOException( - "Unexpected Storage Container Exception: " + e.toString(), e); - } - if (LOG.isDebugEnabled()) { - LOG.debug( - "writing chunk " + chunkInfo.getChunkName() + " blockID " + blockID - + " length " + effectiveChunkSize); - } - containerBlockData.addChunks(chunkInfo); - } - - @VisibleForTesting - public void setXceiverClient(XceiverClientSpi xceiverClient) { - this.xceiverClient = xceiverClient; - } -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java deleted file mode 100644 index 6d534579c8605..0000000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.ByteStringConversion; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.function.Function; - -/** - * This class creates and manages pool of n buffers. - */ -public class BufferPool { - - private List bufferList; - private int currentBufferIndex; - private final int bufferSize; - private final int capacity; - private final Function byteStringConversion; - - public BufferPool(int bufferSize, int capacity) { - this(bufferSize, capacity, - ByteStringConversion.createByteBufferConversion(null)); - } - - public BufferPool(int bufferSize, int capacity, - Function byteStringConversion){ - this.capacity = capacity; - this.bufferSize = bufferSize; - bufferList = new ArrayList<>(capacity); - currentBufferIndex = -1; - this.byteStringConversion = byteStringConversion; - } - - public Function byteStringConversion(){ - return byteStringConversion; - } - - public ByteBuffer getCurrentBuffer() { - return currentBufferIndex == -1 ? null : bufferList.get(currentBufferIndex); - } - - /** - * If the currentBufferIndex is less than the buffer size - 1, - * it means, the next buffer in the list has been freed up for - * rewriting. Reuse the next available buffer in such cases. - * - * In case, the currentBufferIndex == buffer.size and buffer size is still - * less than the capacity to be allocated, just allocate a buffer of size - * chunk size. - * - */ - public ByteBuffer allocateBufferIfNeeded() { - ByteBuffer buffer = getCurrentBuffer(); - if (buffer != null && buffer.hasRemaining()) { - return buffer; - } - if (currentBufferIndex < bufferList.size() - 1) { - buffer = getBuffer(currentBufferIndex + 1); - } else { - buffer = ByteBuffer.allocate(bufferSize); - bufferList.add(buffer); - } - Preconditions.checkArgument(bufferList.size() <= capacity); - currentBufferIndex++; - // TODO: Turn the below precondition check on when Standalone pipeline - // is removed in the write path in tests - // Preconditions.checkArgument(buffer.position() == 0); - return buffer; - } - - public void releaseBuffer(ByteBuffer byteBuffer) { - // always remove from head of the list and append at last - ByteBuffer buffer = bufferList.remove(0); - // Ensure the buffer to be removed is always at the head of the list. - Preconditions.checkArgument(buffer.equals(byteBuffer)); - buffer.clear(); - bufferList.add(buffer); - Preconditions.checkArgument(currentBufferIndex >= 0); - currentBufferIndex--; - } - - public void clearBufferPool() { - bufferList.clear(); - currentBufferIndex = -1; - } - - public void checkBufferPoolEmpty() { - Preconditions.checkArgument(computeBufferData() == 0); - } - - public long computeBufferData() { - return bufferList.stream().mapToInt(value -> value.position()) - .sum(); - } - - public int getSize() { - return bufferList.size(); - } - - public ByteBuffer getBuffer(int index) { - return bufferList.get(index); - } - - int getCurrentBufferIndex() { - return currentBufferIndex; - } - -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java deleted file mode 100644 index f94d2d87340be..0000000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java +++ /dev/null @@ -1,544 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.fs.Seekable; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkResponseProto; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; -import org.apache.hadoop.ozone.common.OzoneChecksumException; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; - -import java.io.EOFException; -import java.io.IOException; -import java.io.InputStream; -import java.nio.ByteBuffer; -import java.util.List; - -/** - * An {@link InputStream} called from BlockInputStream to read a chunk from the - * container. Each chunk may contain multiple underlying {@link ByteBuffer} - * instances. - */ -public class ChunkInputStream extends InputStream implements Seekable { - - private ChunkInfo chunkInfo; - private final long length; - private final BlockID blockID; - private XceiverClientSpi xceiverClient; - private boolean verifyChecksum; - private boolean allocated = false; - - // Buffer to store the chunk data read from the DN container - private List buffers; - - // Index of the buffers corresponding to the current position of the buffers - private int bufferIndex; - - // The offset of the current data residing in the buffers w.r.t the start - // of chunk data - private long bufferOffset; - - // The number of bytes of chunk data residing in the buffers currently - private long bufferLength; - - // Position of the ChunkInputStream is maintained by this variable (if a - // seek is performed. This position is w.r.t to the chunk only and not the - // block or key. This variable is set only if either the buffers are not - // yet allocated or the if the allocated buffers do not cover the seeked - // position. Once the chunk is read, this variable is reset. - private long chunkPosition = -1; - - private static final int EOF = -1; - - ChunkInputStream(ChunkInfo chunkInfo, BlockID blockId, - XceiverClientSpi xceiverClient, boolean verifyChecksum) { - this.chunkInfo = chunkInfo; - this.length = chunkInfo.getLen(); - this.blockID = blockId; - this.xceiverClient = xceiverClient; - this.verifyChecksum = verifyChecksum; - } - - public synchronized long getRemaining() throws IOException { - return length - getPos(); - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized int read() throws IOException { - checkOpen(); - int available = prepareRead(1); - int dataout = EOF; - - if (available == EOF) { - // There is no more data in the chunk stream. The buffers should have - // been released by now - Preconditions.checkState(buffers == null); - } else { - dataout = Byte.toUnsignedInt(buffers.get(bufferIndex).get()); - } - - if (chunkStreamEOF()) { - // consumer might use getPos to determine EOF, - // so release buffers when serving the last byte of data - releaseBuffers(); - } - - return dataout; - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized int read(byte[] b, int off, int len) throws IOException { - // According to the JavaDocs for InputStream, it is recommended that - // subclasses provide an override of bulk read if possible for performance - // reasons. In addition to performance, we need to do it for correctness - // reasons. The Ozone REST service uses PipedInputStream and - // PipedOutputStream to relay HTTP response data between a Jersey thread and - // a Netty thread. It turns out that PipedInputStream/PipedOutputStream - // have a subtle dependency (bug?) on the wrapped stream providing separate - // implementations of single-byte read and bulk read. Without this, get key - // responses might close the connection before writing all of the bytes - // advertised in the Content-Length. - if (b == null) { - throw new NullPointerException(); - } - if (off < 0 || len < 0 || len > b.length - off) { - throw new IndexOutOfBoundsException(); - } - if (len == 0) { - return 0; - } - checkOpen(); - int total = 0; - while (len > 0) { - int available = prepareRead(len); - if (available == EOF) { - // There is no more data in the chunk stream. The buffers should have - // been released by now - Preconditions.checkState(buffers == null); - return total != 0 ? total : EOF; - } - buffers.get(bufferIndex).get(b, off + total, available); - len -= available; - total += available; - } - - if (chunkStreamEOF()) { - // smart consumers determine EOF by calling getPos() - // so we release buffers when serving the final bytes of data - releaseBuffers(); - } - - return total; - } - - /** - * Seeks the ChunkInputStream to the specified position. This is done by - * updating the chunkPosition to the seeked position in case the buffers - * are not allocated or buffers do not contain the data corresponding to - * the seeked position (determined by buffersHavePosition()). Otherwise, - * the buffers position is updated to the seeked position. - */ - @Override - public synchronized void seek(long pos) throws IOException { - if (pos < 0 || pos >= length) { - if (pos == 0) { - // It is possible for length and pos to be zero in which case - // seek should return instead of throwing exception - return; - } - throw new EOFException("EOF encountered at pos: " + pos + " for chunk: " - + chunkInfo.getChunkName()); - } - - if (buffersHavePosition(pos)) { - // The bufferPosition is w.r.t the current chunk. - // Adjust the bufferIndex and position to the seeked position. - adjustBufferPosition(pos - bufferOffset); - } else { - chunkPosition = pos; - } - } - - @Override - public synchronized long getPos() throws IOException { - if (chunkPosition >= 0) { - return chunkPosition; - } - if (chunkStreamEOF()) { - return length; - } - if (buffersHaveData()) { - return bufferOffset + buffers.get(bufferIndex).position(); - } - if (buffersAllocated()) { - return bufferOffset + bufferLength; - } - return 0; - } - - @Override - public boolean seekToNewSource(long targetPos) throws IOException { - return false; - } - - @Override - public synchronized void close() { - if (xceiverClient != null) { - xceiverClient = null; - } - } - - /** - * Checks if the stream is open. If not, throw an exception. - * - * @throws IOException if stream is closed - */ - protected synchronized void checkOpen() throws IOException { - if (xceiverClient == null) { - throw new IOException("BlockInputStream has been closed."); - } - } - - /** - * Prepares to read by advancing through buffers or allocating new buffers, - * as needed until it finds data to return, or encounters EOF. - * @param len desired lenght of data to read - * @return length of data available to read, possibly less than desired length - */ - private synchronized int prepareRead(int len) throws IOException { - for (;;) { - if (chunkPosition >= 0) { - if (buffersHavePosition(chunkPosition)) { - // The current buffers have the seeked position. Adjust the buffer - // index and position to point to the chunkPosition. - adjustBufferPosition(chunkPosition - bufferOffset); - } else { - // Read a required chunk data to fill the buffers with seeked - // position data - readChunkFromContainer(len); - } - } - if (buffersHaveData()) { - // Data is available from buffers - ByteBuffer bb = buffers.get(bufferIndex); - return len > bb.remaining() ? bb.remaining() : len; - } else if (dataRemainingInChunk()) { - // There is more data in the chunk stream which has not - // been read into the buffers yet. - readChunkFromContainer(len); - } else { - // All available input from this chunk stream has been consumed. - return EOF; - } - } - } - - /** - * Reads full or partial Chunk from DN Container based on the current - * position of the ChunkInputStream, the number of bytes of data to read - * and the checksum boundaries. - * If successful, then the read data in saved in the buffers so that - * subsequent read calls can utilize it. - * @param len number of bytes of data to be read - * @throws IOException if there is an I/O error while performing the call - * to Datanode - */ - private synchronized void readChunkFromContainer(int len) throws IOException { - - // index of first byte to be read from the chunk - long startByteIndex; - if (chunkPosition >= 0) { - // If seek operation was called to advance the buffer position, the - // chunk should be read from that position onwards. - startByteIndex = chunkPosition; - } else { - // Start reading the chunk from the last chunkPosition onwards. - startByteIndex = bufferOffset + bufferLength; - } - - if (verifyChecksum) { - // Update the bufferOffset and bufferLength as per the checksum - // boundary requirement. - computeChecksumBoundaries(startByteIndex, len); - } else { - // Read from the startByteIndex - bufferOffset = startByteIndex; - bufferLength = len; - } - - // Adjust the chunkInfo so that only the required bytes are read from - // the chunk. - final ChunkInfo adjustedChunkInfo = ChunkInfo.newBuilder(chunkInfo) - .setOffset(bufferOffset) - .setLen(bufferLength) - .build(); - - ByteString byteString = readChunk(adjustedChunkInfo); - - buffers = byteString.asReadOnlyByteBufferList(); - bufferIndex = 0; - allocated = true; - - // If the stream was seeked to position before, then the buffer - // position should be adjusted as the reads happen at checksum boundaries. - // The buffers position might need to be adjusted for the following - // scenarios: - // 1. Stream was seeked to a position before the chunk was read - // 2. Chunk was read from index < the current position to account for - // checksum boundaries. - adjustBufferPosition(startByteIndex - bufferOffset); - } - - /** - * Send RPC call to get the chunk from the container. - */ - @VisibleForTesting - protected ByteString readChunk(ChunkInfo readChunkInfo) throws IOException { - ReadChunkResponseProto readChunkResponse; - - try { - List validators = - ContainerProtocolCalls.getValidatorList(); - validators.add(validator); - - readChunkResponse = ContainerProtocolCalls.readChunk(xceiverClient, - readChunkInfo, blockID, validators); - - } catch (IOException e) { - if (e instanceof StorageContainerException) { - throw e; - } - throw new IOException("Unexpected OzoneException: " + e.toString(), e); - } - - return readChunkResponse.getData(); - } - - private CheckedBiFunction validator = - (request, response) -> { - final ChunkInfo reqChunkInfo = - request.getReadChunk().getChunkData(); - - ReadChunkResponseProto readChunkResponse = response.getReadChunk(); - ByteString byteString = readChunkResponse.getData(); - - if (byteString.size() != reqChunkInfo.getLen()) { - // Bytes read from chunk should be equal to chunk size. - throw new OzoneChecksumException(String - .format("Inconsistent read for chunk=%s len=%d bytesRead=%d", - reqChunkInfo.getChunkName(), reqChunkInfo.getLen(), - byteString.size())); - } - - if (verifyChecksum) { - ChecksumData checksumData = ChecksumData.getFromProtoBuf( - chunkInfo.getChecksumData()); - - // ChecksumData stores checksum for each 'numBytesPerChecksum' - // number of bytes in a list. Compute the index of the first - // checksum to match with the read data - - int checkumStartIndex = (int) (reqChunkInfo.getOffset() / - checksumData.getBytesPerChecksum()); - Checksum.verifyChecksum( - byteString, checksumData, checkumStartIndex); - } - }; - - /** - * Return the offset and length of bytes that need to be read from the - * chunk file to cover the checksum boundaries covering the actual start and - * end of the chunk index to be read. - * For example, lets say the client is reading from index 120 to 450 in the - * chunk. And let's say checksum is stored for every 100 bytes in the chunk - * i.e. the first checksum is for bytes from index 0 to 99, the next for - * bytes from index 100 to 199 and so on. To verify bytes from 120 to 450, - * we would need to read from bytes 100 to 499 so that checksum - * verification can be done. - * - * @param startByteIndex the first byte index to be read by client - * @param dataLen number of bytes to be read from the chunk - */ - private void computeChecksumBoundaries(long startByteIndex, int dataLen) { - - int bytesPerChecksum = chunkInfo.getChecksumData().getBytesPerChecksum(); - // index of the last byte to be read from chunk, inclusively. - final long endByteIndex = startByteIndex + dataLen - 1; - - bufferOffset = (startByteIndex / bytesPerChecksum) - * bytesPerChecksum; // inclusive - final long endIndex = ((endByteIndex / bytesPerChecksum) + 1) - * bytesPerChecksum; // exclusive - bufferLength = Math.min(endIndex, length) - bufferOffset; - } - - /** - * Adjust the buffers position to account for seeked position and/ or checksum - * boundary reads. - * @param bufferPosition the position to which the buffers must be advanced - */ - private void adjustBufferPosition(long bufferPosition) { - // The bufferPosition is w.r.t the current chunk. - // Adjust the bufferIndex and position to the seeked chunkPosition. - long tempOffest = 0; - for (int i = 0; i < buffers.size(); i++) { - if (bufferPosition - tempOffest >= buffers.get(i).capacity()) { - tempOffest += buffers.get(i).capacity(); - } else { - bufferIndex = i; - break; - } - } - buffers.get(bufferIndex).position((int) (bufferPosition - tempOffest)); - - // Reset the chunkPosition as chunk stream has been initialized i.e. the - // buffers have been allocated. - resetPosition(); - } - - /** - * Check if the buffers have been allocated data and false otherwise. - */ - private boolean buffersAllocated() { - return buffers != null && !buffers.isEmpty(); - } - - /** - * Check if the buffers have any data remaining between the current - * position and the limit. - */ - private boolean buffersHaveData() { - boolean hasData = false; - - if (buffersAllocated()) { - while (bufferIndex < (buffers.size())) { - if (buffers.get(bufferIndex).hasRemaining()) { - // current buffer has data - hasData = true; - break; - } else { - if (buffersRemaining()) { - // move to next available buffer - ++bufferIndex; - Preconditions.checkState(bufferIndex < buffers.size()); - } else { - // no more buffers remaining - break; - } - } - } - } - - return hasData; - } - - private boolean buffersRemaining() { - return (bufferIndex < (buffers.size() - 1)); - } - - /** - * Check if curernt buffers have the data corresponding to the input position. - */ - private boolean buffersHavePosition(long pos) { - // Check if buffers have been allocated - if (buffersAllocated()) { - // Check if the current buffers cover the input position - return pos >= bufferOffset && - pos < bufferOffset + bufferLength; - } - return false; - } - - /** - * Check if there is more data in the chunk which has not yet been read - * into the buffers. - */ - private boolean dataRemainingInChunk() { - long bufferPos; - if (chunkPosition >= 0) { - bufferPos = chunkPosition; - } else { - bufferPos = bufferOffset + bufferLength; - } - - return bufferPos < length; - } - - /** - * Check if end of chunkStream has been reached. - */ - private boolean chunkStreamEOF() { - if (!allocated) { - // Chunk data has not been read yet - return false; - } - - if (buffersHaveData() || dataRemainingInChunk()) { - return false; - } else { - Preconditions.checkState(bufferOffset + bufferLength == length, - "EOF detected, but not at the last byte of the chunk"); - return true; - } - } - - /** - * If EOF is reached, release the buffers. - */ - private void releaseBuffers() { - buffers = null; - bufferIndex = 0; - } - - /** - * Reset the chunkPosition once the buffers are allocated. - */ - void resetPosition() { - this.chunkPosition = -1; - } - - String getChunkName() { - return chunkInfo.getChunkName(); - } - - protected long getLength() { - return length; - } - - @VisibleForTesting - protected long getChunkPosition() { - return chunkPosition; - } -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java deleted file mode 100644 index 1d9d55bfbfbb6..0000000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This class maintains the map of the commitIndexes to be watched for - * successful replication in the datanodes in a given pipeline. It also releases - * the buffers associated with the user data back to {@Link BufferPool} once - * minimum replication criteria is achieved during an ozone key write. - */ -package org.apache.hadoop.hdds.scm.storage; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.XceiverClientReply; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.ExecutionException; - -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.stream.Collectors; - -/** - * This class executes watchForCommit on ratis pipeline and releases - * buffers once data successfully gets replicated. - */ -public class CommitWatcher { - - private static final Logger LOG = - LoggerFactory.getLogger(CommitWatcher.class); - - // A reference to the pool of buffers holding the data - private BufferPool bufferPool; - - // The map should maintain the keys (logIndexes) in order so that while - // removing we always end up updating incremented data flushed length. - // Also, corresponding to the logIndex, the corresponding list of buffers will - // be released from the buffer pool. - private ConcurrentSkipListMap> - commitIndex2flushedDataMap; - - // future Map to hold up all putBlock futures - private ConcurrentHashMap> - futureMap; - - private XceiverClientSpi xceiverClient; - - private final long watchTimeout; - - // total data which has been successfully flushed and acknowledged - // by all servers - private long totalAckDataLength; - - public CommitWatcher(BufferPool bufferPool, XceiverClientSpi xceiverClient, - long watchTimeout) { - this.bufferPool = bufferPool; - this.xceiverClient = xceiverClient; - this.watchTimeout = watchTimeout; - commitIndex2flushedDataMap = new ConcurrentSkipListMap<>(); - totalAckDataLength = 0; - futureMap = new ConcurrentHashMap<>(); - } - - /** - * just update the totalAckDataLength. In case of failure, - * we will read the data starting from totalAckDataLength. - */ - private long releaseBuffers(List indexes) { - Preconditions.checkArgument(!commitIndex2flushedDataMap.isEmpty()); - for (long index : indexes) { - Preconditions.checkState(commitIndex2flushedDataMap.containsKey(index)); - List buffers = commitIndex2flushedDataMap.remove(index); - long length = buffers.stream().mapToLong(value -> { - int pos = value.position(); - return pos; - }).sum(); - totalAckDataLength += length; - // clear the future object from the future Map - Preconditions.checkNotNull(futureMap.remove(totalAckDataLength)); - for (ByteBuffer byteBuffer : buffers) { - bufferPool.releaseBuffer(byteBuffer); - } - } - return totalAckDataLength; - } - - public void updateCommitInfoMap(long index, List byteBufferList) { - commitIndex2flushedDataMap - .put(index, byteBufferList); - } - - int getCommitInfoMapSize() { - return commitIndex2flushedDataMap.size(); - } - - /** - * Calls watch for commit for the first index in commitIndex2flushedDataMap to - * the Ratis client. - * @return reply reply from raft client - * @throws IOException in case watchForCommit fails - */ - public XceiverClientReply watchOnFirstIndex() throws IOException { - if (!commitIndex2flushedDataMap.isEmpty()) { - // wait for the first commit index in the commitIndex2flushedDataMap - // to get committed to all or majority of nodes in case timeout - // happens. - long index = - commitIndex2flushedDataMap.keySet().stream().mapToLong(v -> v).min() - .getAsLong(); - if (LOG.isDebugEnabled()) { - LOG.debug("waiting for first index " + index + " to catch up"); - } - return watchForCommit(index); - } else { - return null; - } - } - - /** - * Calls watch for commit for the first index in commitIndex2flushedDataMap to - * the Ratis client. - * @return reply reply from raft client - * @throws IOException in case watchForCommit fails - */ - public XceiverClientReply watchOnLastIndex() - throws IOException { - if (!commitIndex2flushedDataMap.isEmpty()) { - // wait for the commit index in the commitIndex2flushedDataMap - // to get committed to all or majority of nodes in case timeout - // happens. - long index = - commitIndex2flushedDataMap.keySet().stream().mapToLong(v -> v).max() - .getAsLong(); - if (LOG.isDebugEnabled()) { - LOG.debug("waiting for last flush Index " + index + " to catch up"); - } - return watchForCommit(index); - } else { - return null; - } - } - - - private void adjustBuffers(long commitIndex) { - List keyList = commitIndex2flushedDataMap.keySet().stream() - .filter(p -> p <= commitIndex).collect(Collectors.toList()); - if (keyList.isEmpty()) { - return; - } else { - releaseBuffers(keyList); - } - } - - // It may happen that once the exception is encountered , we still might - // have successfully flushed up to a certain index. Make sure the buffers - // only contain data which have not been sufficiently replicated - void releaseBuffersOnException() { - adjustBuffers(xceiverClient.getReplicatedMinCommitIndex()); - } - - - /** - * calls watchForCommit API of the Ratis Client. For Standalone client, - * it is a no op. - * @param commitIndex log index to watch for - * @return minimum commit index replicated to all nodes - * @throws IOException IOException in case watch gets timed out - */ - public XceiverClientReply watchForCommit(long commitIndex) - throws IOException { - long index; - try { - XceiverClientReply reply = - xceiverClient.watchForCommit(commitIndex, watchTimeout); - if (reply == null) { - index = 0; - } else { - index = reply.getLogIndex(); - } - adjustBuffers(index); - return reply; - } catch (TimeoutException | InterruptedException | ExecutionException e) { - LOG.warn("watchForCommit failed for index " + commitIndex, e); - IOException ioException = new IOException( - "Unexpected Storage Container Exception: " + e.toString(), e); - releaseBuffersOnException(); - throw ioException; - } - } - - @VisibleForTesting - public ConcurrentSkipListMap> getCommitIndex2flushedDataMap() { - return commitIndex2flushedDataMap; - } - - public ConcurrentHashMap> getFutureMap() { - return futureMap; - } - - public long getTotalAckDataLength() { - return totalAckDataLength; - } - - public void cleanup() { - if (commitIndex2flushedDataMap != null) { - commitIndex2flushedDataMap.clear(); - } - if (futureMap != null) { - futureMap.clear(); - } - commitIndex2flushedDataMap = null; - } -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java deleted file mode 100644 index 6e7ce948784d0..0000000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -/** - * Low level IO streams to upload/download chunks from container service. - */ \ No newline at end of file diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java deleted file mode 100644 index 042bfd941743e..0000000000000 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -import com.google.common.primitives.Bytes; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.security.token.Token; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.EOFException; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Random; - -import static org.apache.hadoop.hdds.scm.storage.TestChunkInputStream.generateRandomData; - -/** - * Tests for {@link BlockInputStream}'s functionality. - */ -public class TestBlockInputStream { - - private static final int CHUNK_SIZE = 100; - private static Checksum checksum; - - private BlockInputStream blockStream; - private byte[] blockData; - private int blockSize; - private List chunks; - private Map chunkDataMap; - - @Before - public void setup() throws Exception { - BlockID blockID = new BlockID(new ContainerBlockID(1, 1)); - checksum = new Checksum(ChecksumType.NONE, CHUNK_SIZE); - createChunkList(5); - - blockStream = new DummyBlockInputStream(blockID, blockSize, null, null, - false, null); - } - - /** - * Create a mock list of chunks. The first n-1 chunks of length CHUNK_SIZE - * and the last chunk with length CHUNK_SIZE/2. - */ - private void createChunkList(int numChunks) - throws Exception { - - chunks = new ArrayList<>(numChunks); - chunkDataMap = new HashMap<>(); - blockData = new byte[0]; - int i, chunkLen; - byte[] byteData; - String chunkName; - - for (i = 0; i < numChunks; i++) { - chunkName = "chunk-" + i; - chunkLen = CHUNK_SIZE; - if (i == numChunks - 1) { - chunkLen = CHUNK_SIZE / 2; - } - byteData = generateRandomData(chunkLen); - ChunkInfo chunkInfo = ChunkInfo.newBuilder() - .setChunkName(chunkName) - .setOffset(0) - .setLen(chunkLen) - .setChecksumData(checksum.computeChecksum( - byteData, 0, chunkLen).getProtoBufMessage()) - .build(); - - chunkDataMap.put(chunkName, byteData); - chunks.add(chunkInfo); - - blockSize += chunkLen; - blockData = Bytes.concat(blockData, byteData); - } - } - - /** - * A dummy BlockInputStream to mock read block call to DN. - */ - private class DummyBlockInputStream extends BlockInputStream { - - DummyBlockInputStream(BlockID blockId, - long blockLen, - Pipeline pipeline, - Token token, - boolean verifyChecksum, - XceiverClientManager xceiverClientManager) { - super(blockId, blockLen, pipeline, token, verifyChecksum, - xceiverClientManager); - } - - @Override - protected List getChunkInfos() { - return chunks; - } - - @Override - protected void addStream(ChunkInfo chunkInfo) { - TestChunkInputStream testChunkInputStream = new TestChunkInputStream(); - getChunkStreams().add(testChunkInputStream.new DummyChunkInputStream( - chunkInfo, null, null, false, - chunkDataMap.get(chunkInfo.getChunkName()).clone())); - } - - @Override - protected synchronized void checkOpen() throws IOException { - // No action needed - } - } - - private void seekAndVerify(int pos) throws Exception { - blockStream.seek(pos); - Assert.assertEquals("Current position of buffer does not match with the " + - "seeked position", pos, blockStream.getPos()); - } - - /** - * Match readData with the chunkData byte-wise. - * @param readData Data read through ChunkInputStream - * @param inputDataStartIndex first index (inclusive) in chunkData to compare - * with read data - * @param length the number of bytes of data to match starting from - * inputDataStartIndex - */ - private void matchWithInputData(byte[] readData, int inputDataStartIndex, - int length) { - for (int i = inputDataStartIndex; i < inputDataStartIndex + length; i++) { - Assert.assertEquals(blockData[i], readData[i - inputDataStartIndex]); - } - } - - @Test - public void testSeek() throws Exception { - // Seek to position 0 - int pos = 0; - seekAndVerify(pos); - Assert.assertEquals("ChunkIndex is incorrect", 0, - blockStream.getChunkIndex()); - - // Before BlockInputStream is initialized (initialization happens during - // read operation), seek should update the BlockInputStream#blockPosition - pos = CHUNK_SIZE; - seekAndVerify(pos); - Assert.assertEquals("ChunkIndex is incorrect", 0, - blockStream.getChunkIndex()); - Assert.assertEquals(pos, blockStream.getBlockPosition()); - - // Initialize the BlockInputStream. After initializtion, the chunkIndex - // should be updated to correspond to the seeked position. - blockStream.initialize(); - Assert.assertEquals("ChunkIndex is incorrect", 1, - blockStream.getChunkIndex()); - - pos = (CHUNK_SIZE * 4) + 5; - seekAndVerify(pos); - Assert.assertEquals("ChunkIndex is incorrect", 4, - blockStream.getChunkIndex()); - - try { - // Try seeking beyond the blockSize. - pos = blockSize + 10; - seekAndVerify(pos); - Assert.fail("Seek to position beyond block size should fail."); - } catch (EOFException e) { - System.out.println(e); - } - - // Seek to random positions between 0 and the block size. - Random random = new Random(); - for (int i = 0; i < 10; i++) { - pos = random.nextInt(blockSize); - seekAndVerify(pos); - } - } - - @Test - public void testRead() throws Exception { - // read 200 bytes of data starting from position 50. Chunk0 contains - // indices 0 to 99, chunk1 from 100 to 199 and chunk3 from 200 to 299. So - // the read should result in 3 ChunkInputStream reads - seekAndVerify(50); - byte[] b = new byte[200]; - blockStream.read(b, 0, 200); - matchWithInputData(b, 50, 200); - - // The new position of the blockInputStream should be the last index read - // + 1. - Assert.assertEquals(250, blockStream.getPos()); - Assert.assertEquals(2, blockStream.getChunkIndex()); - } - - @Test - public void testSeekAndRead() throws Exception { - // Seek to a position and read data - seekAndVerify(50); - byte[] b1 = new byte[100]; - blockStream.read(b1, 0, 100); - matchWithInputData(b1, 50, 100); - - // Next read should start from the position of the last read + 1 i.e. 100 - byte[] b2 = new byte[100]; - blockStream.read(b2, 0, 100); - matchWithInputData(b2, 150, 100); - } -} diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java deleted file mode 100644 index a5fe26b5619ab..0000000000000 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.EOFException; -import java.util.ArrayList; -import java.util.List; -import java.util.Random; - -/** - * Tests for {@link ChunkInputStream}'s functionality. - */ -public class TestChunkInputStream { - - private static final int CHUNK_SIZE = 100; - private static final int BYTES_PER_CHECKSUM = 20; - private static final String CHUNK_NAME = "dummyChunk"; - private static final Random RANDOM = new Random(); - private static Checksum checksum; - - private DummyChunkInputStream chunkStream; - private ChunkInfo chunkInfo; - private byte[] chunkData; - - @Before - public void setup() throws Exception { - checksum = new Checksum(ChecksumType.valueOf( - OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT), - BYTES_PER_CHECKSUM); - - chunkData = generateRandomData(CHUNK_SIZE); - - chunkInfo = ChunkInfo.newBuilder() - .setChunkName(CHUNK_NAME) - .setOffset(0) - .setLen(CHUNK_SIZE) - .setChecksumData(checksum.computeChecksum( - chunkData, 0, CHUNK_SIZE).getProtoBufMessage()) - .build(); - - chunkStream = new DummyChunkInputStream(chunkInfo, null, null, true); - } - - static byte[] generateRandomData(int length) { - byte[] bytes = new byte[length]; - RANDOM.nextBytes(bytes); - return bytes; - } - - /** - * A dummy ChunkInputStream to mock read chunk calls to DN. - */ - public class DummyChunkInputStream extends ChunkInputStream { - - // Stores the read chunk data in each readChunk call - private List readByteBuffers = new ArrayList<>(); - - DummyChunkInputStream(ChunkInfo chunkInfo, - BlockID blockId, - XceiverClientSpi xceiverClient, - boolean verifyChecksum) { - super(chunkInfo, blockId, xceiverClient, verifyChecksum); - } - - public DummyChunkInputStream(ChunkInfo chunkInfo, - BlockID blockId, - XceiverClientSpi xceiverClient, - boolean verifyChecksum, - byte[] data) { - super(chunkInfo, blockId, xceiverClient, verifyChecksum); - chunkData = data; - } - - @Override - protected ByteString readChunk(ChunkInfo readChunkInfo) { - ByteString byteString = ByteString.copyFrom(chunkData, - (int) readChunkInfo.getOffset(), - (int) readChunkInfo.getLen()); - readByteBuffers.add(byteString); - return byteString; - } - - @Override - protected void checkOpen() { - // No action needed - } - } - - /** - * Match readData with the chunkData byte-wise. - * @param readData Data read through ChunkInputStream - * @param inputDataStartIndex first index (inclusive) in chunkData to compare - * with read data - * @param length the number of bytes of data to match starting from - * inputDataStartIndex - */ - private void matchWithInputData(byte[] readData, int inputDataStartIndex, - int length) { - for (int i = inputDataStartIndex; i < inputDataStartIndex + length; i++) { - Assert.assertEquals(chunkData[i], readData[i - inputDataStartIndex]); - } - } - - /** - * Seek to a position and verify through getPos(). - */ - private void seekAndVerify(int pos) throws Exception { - chunkStream.seek(pos); - Assert.assertEquals("Current position of buffer does not match with the " + - "seeked position", pos, chunkStream.getPos()); - } - - @Test - public void testFullChunkRead() throws Exception { - byte[] b = new byte[CHUNK_SIZE]; - chunkStream.read(b, 0, CHUNK_SIZE); - - matchWithInputData(b, 0, CHUNK_SIZE); - } - - @Test - public void testPartialChunkRead() throws Exception { - int len = CHUNK_SIZE / 2; - byte[] b = new byte[len]; - - chunkStream.read(b, 0, len); - - matchWithInputData(b, 0, len); - - // To read chunk data from index 0 to 49 (len = 50), we need to read - // chunk from offset 0 to 60 as the checksum boundary is at every 20 - // bytes. Verify that 60 bytes of chunk data are read and stored in the - // buffers. - matchWithInputData(chunkStream.readByteBuffers.get(0).toByteArray(), - 0, 60); - - } - - @Test - public void testSeek() throws Exception { - seekAndVerify(0); - - try { - seekAndVerify(CHUNK_SIZE); - Assert.fail("Seeking to Chunk Length should fail."); - } catch (EOFException e) { - GenericTestUtils.assertExceptionContains("EOF encountered at pos: " - + CHUNK_SIZE + " for chunk: " + CHUNK_NAME, e); - } - - // Seek before read should update the ChunkInputStream#chunkPosition - seekAndVerify(25); - Assert.assertEquals(25, chunkStream.getChunkPosition()); - - // Read from the seeked position. - // Reading from index 25 to 54 should result in the ChunkInputStream - // copying chunk data from index 20 to 59 into the buffers (checksum - // boundaries). - byte[] b = new byte[30]; - chunkStream.read(b, 0, 30); - matchWithInputData(b, 25, 30); - matchWithInputData(chunkStream.readByteBuffers.get(0).toByteArray(), - 20, 40); - - // After read, the position of the chunkStream is evaluated from the - // buffers and the chunkPosition should be reset to -1. - Assert.assertEquals(-1, chunkStream.getChunkPosition()); - - // Seek to a position within the current buffers. Current buffers contain - // data from index 20 to 59. ChunkPosition should still not be used to - // set the position. - seekAndVerify(35); - Assert.assertEquals(-1, chunkStream.getChunkPosition()); - - // Seek to a position outside the current buffers. In this case, the - // chunkPosition should be updated to the seeked position. - seekAndVerify(75); - Assert.assertEquals(75, chunkStream.getChunkPosition()); - } - - @Test - public void testSeekAndRead() throws Exception { - // Seek to a position and read data - seekAndVerify(50); - byte[] b1 = new byte[20]; - chunkStream.read(b1, 0, 20); - matchWithInputData(b1, 50, 20); - - // Next read should start from the position of the last read + 1 i.e. 70 - byte[] b2 = new byte[20]; - chunkStream.read(b2, 0, 20); - matchWithInputData(b2, 70, 20); - } -} \ No newline at end of file diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java deleted file mode 100644 index abdd04ea967d8..0000000000000 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * This package contains Ozone InputStream related tests. - */ -package org.apache.hadoop.hdds.scm.storage; \ No newline at end of file diff --git a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml deleted file mode 100644 index 4441b69d8683e..0000000000000 --- a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - - - - - - - - - diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml deleted file mode 100644 index 9af807f8b9eb0..0000000000000 --- a/hadoop-hdds/common/pom.xml +++ /dev/null @@ -1,285 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-hdds - 0.5.0-SNAPSHOT - - hadoop-hdds-common - 0.5.0-SNAPSHOT - Apache Hadoop Distributed Data Store Common - Apache Hadoop HDDS Common - jar - - - 0.5.0-SNAPSHOT - 2.11.0 - 3.4.2 - ${hdds.version} - - - - - org.apache.hadoop - hadoop-hdds-config - - - - javax.annotation - javax.annotation-api - 1.2 - - - - org.fusesource.leveldbjni - leveldbjni-all - - - - ratis-server - org.apache.ratis - - - org.slf4j - slf4j-log4j12 - - - io.dropwizard.metrics - metrics-core - - - org.bouncycastle - bcprov-jdk15on - - - - - ratis-netty - org.apache.ratis - - - ratis-grpc - org.apache.ratis - - - com.google.errorprone - error_prone_annotations - 2.2.0 - true - - - - org.rocksdb - rocksdbjni - 6.0.1 - - - org.apache.hadoop - hadoop-common - test - test-jar - - - - org.apache.logging.log4j - log4j-api - ${log4j2.version} - - - org.apache.logging.log4j - log4j-core - ${log4j2.version} - - - com.lmax - disruptor - ${disruptor.version} - - - org.apache.commons - commons-pool2 - 2.6.0 - - - org.bouncycastle - bcpkix-jdk15on - ${bouncycastle.version} - - - - commons-validator - commons-validator - 1.6 - - - org.junit.jupiter - junit-jupiter-api - - - io.jaegertracing - jaeger-client - ${jaeger.version} - - - io.opentracing - opentracing-util - 0.31.0 - - - org.yaml - snakeyaml - 1.16 - - - - - - - ${basedir}/src/main/resources - - hdds-version-info.properties - - false - - - ${basedir}/src/main/resources - - hdds-version-info.properties - - true - - - - - kr.motd.maven - os-maven-plugin - ${os-maven-plugin.version} - - - - - org.xolstice.maven.plugins - protobuf-maven-plugin - ${protobuf-maven-plugin.version} - true - - - com.google.protobuf:protoc:${protobuf-compile.version}:exe:${os.detected.classifier} - - ${basedir}/src/main/proto/ - - DatanodeContainerProtocol.proto - - target/generated-sources/java - false - - - - compile-protoc - - compile - test-compile - compile-custom - test-compile-custom - - - grpc-java - - io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} - - - - - - - maven-antrun-plugin - - - generate-sources - - - - - - - - - - run - - - - - - org.apache.hadoop - hadoop-maven-plugins - - - version-info - generate-resources - - version-info - - - - ${basedir}/../ - - */src/main/java/**/*.java - */src/main/proto/*.proto - - - - - - compile-protoc - - protoc - - - ${protobuf.version} - ${protoc.path} - - ${basedir}/src/main/proto - - - ${basedir}/src/main/proto - - StorageContainerLocationProtocol.proto - hdds.proto - ScmBlockLocationProtocol.proto - SCMSecurityProtocol.proto - - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - - - - diff --git a/hadoop-hdds/common/src/main/bin/hadoop-config.cmd b/hadoop-hdds/common/src/main/bin/hadoop-config.cmd deleted file mode 100644 index d77dc5346a1fc..0000000000000 --- a/hadoop-hdds/common/src/main/bin/hadoop-config.cmd +++ /dev/null @@ -1,317 +0,0 @@ -@echo off -@rem Licensed to the Apache Software Foundation (ASF) under one or more -@rem contributor license agreements. See the NOTICE file distributed with -@rem this work for additional information regarding copyright ownership. -@rem The ASF licenses this file to You under the Apache License, Version 2.0 -@rem (the "License"); you may not use this file except in compliance with -@rem the License. You may obtain a copy of the License at -@rem -@rem http://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. - -@rem included in all the hadoop scripts with source command -@rem should not be executable directly -@rem also should not be passed any arguments, since we need original %* - -if not defined HADOOP_COMMON_DIR ( - set HADOOP_COMMON_DIR=share\hadoop\common -) -if not defined HADOOP_COMMON_LIB_JARS_DIR ( - set HADOOP_COMMON_LIB_JARS_DIR=share\hadoop\common\lib -) -if not defined HADOOP_COMMON_LIB_NATIVE_DIR ( - set HADOOP_COMMON_LIB_NATIVE_DIR=lib\native -) -if not defined HDFS_DIR ( - set HDFS_DIR=share\hadoop\hdfs -) -if not defined HDFS_LIB_JARS_DIR ( - set HDFS_LIB_JARS_DIR=share\hadoop\hdfs\lib -) -if not defined YARN_DIR ( - set YARN_DIR=share\hadoop\yarn -) -if not defined YARN_LIB_JARS_DIR ( - set YARN_LIB_JARS_DIR=share\hadoop\yarn\lib -) -if not defined MAPRED_DIR ( - set MAPRED_DIR=share\hadoop\mapreduce -) -if not defined MAPRED_LIB_JARS_DIR ( - set MAPRED_LIB_JARS_DIR=share\hadoop\mapreduce\lib -) - -@rem the root of the Hadoop installation -set HADOOP_HOME=%~dp0 -for %%i in (%HADOOP_HOME%.) do ( - set HADOOP_HOME=%%~dpi -) -if "%HADOOP_HOME:~-1%" == "\" ( - set HADOOP_HOME=%HADOOP_HOME:~0,-1% -) - -if not exist %HADOOP_HOME%\share\hadoop\common\hadoop-common-*.jar ( - @echo +================================================================+ - @echo ^| Error: HADOOP_HOME is not set correctly ^| - @echo +----------------------------------------------------------------+ - @echo ^| Please set your HADOOP_HOME variable to the absolute path of ^| - @echo ^| the directory that contains the hadoop distribution ^| - @echo +================================================================+ - exit /b 1 -) - -if not defined HADOOP_CONF_DIR ( - set HADOOP_CONF_DIR=%HADOOP_HOME%\etc\hadoop -) - -@rem -@rem Allow alternate conf dir location. -@rem - -if "%1" == "--config" ( - set HADOOP_CONF_DIR=%2 - shift - shift -) - -@rem -@rem check to see it is specified whether to use the workers or the -@rem masters file -@rem - -if "%1" == "--hosts" ( - set HADOOP_WORKERS=%HADOOP_CONF_DIR%\%2 - shift - shift -) - -@rem -@rem Set log level. Default to INFO. -@rem - -if "%1" == "--loglevel" ( - set HADOOP_LOGLEVEL=%2 - shift - shift -) - -if exist %HADOOP_CONF_DIR%\hadoop-env.cmd ( - call %HADOOP_CONF_DIR%\hadoop-env.cmd -) - -@rem -@rem setup java environment variables -@rem - -if not defined JAVA_HOME ( - echo Error: JAVA_HOME is not set. - goto :eof -) - -if not exist %JAVA_HOME%\bin\java.exe ( - echo Error: JAVA_HOME is incorrectly set. - echo Please update %HADOOP_CONF_DIR%\hadoop-env.cmd - goto :eof -) - -set JAVA=%JAVA_HOME%\bin\java -@rem some Java parameters -set JAVA_HEAP_MAX=-Xmx1000m - -@rem -@rem check envvars which might override default args -@rem - -if defined HADOOP_HEAPSIZE ( - set JAVA_HEAP_MAX=-Xmx%HADOOP_HEAPSIZE%m -) - -@rem -@rem CLASSPATH initially contains %HADOOP_CONF_DIR% -@rem - -set CLASSPATH=%HADOOP_CONF_DIR% - -if not defined HADOOP_COMMON_HOME ( - if exist %HADOOP_HOME%\share\hadoop\common ( - set HADOOP_COMMON_HOME=%HADOOP_HOME% - ) -) - -@rem -@rem for releases, add core hadoop jar & webapps to CLASSPATH -@rem - -if exist %HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%\webapps ( - set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR% -) - -if exist %HADOOP_COMMON_HOME%\%HADOOP_COMMON_LIB_JARS_DIR% ( - set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_LIB_JARS_DIR%\* -) - -set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%\* - -@rem -@rem default log directory % file -@rem - -if not defined HADOOP_LOG_DIR ( - set HADOOP_LOG_DIR=%HADOOP_HOME%\logs -) - -if not defined HADOOP_LOGFILE ( - set HADOOP_LOGFILE=hadoop.log -) - -if not defined HADOOP_LOGLEVEL ( - set HADOOP_LOGLEVEL=INFO -) - -if not defined HADOOP_ROOT_LOGGER ( - set HADOOP_ROOT_LOGGER=%HADOOP_LOGLEVEL%,console -) - -@rem -@rem default policy file for service-level authorization -@rem - -if not defined HADOOP_POLICYFILE ( - set HADOOP_POLICYFILE=hadoop-policy.xml -) - -@rem -@rem Determine the JAVA_PLATFORM -@rem - -for /f "delims=" %%A in ('%JAVA% -Xmx32m %HADOOP_JAVA_PLATFORM_OPTS% -classpath "%CLASSPATH%" org.apache.hadoop.util.PlatformName') do set JAVA_PLATFORM=%%A -@rem replace space with underscore -set JAVA_PLATFORM=%JAVA_PLATFORM: =_% - -@rem -@rem setup 'java.library.path' for native hadoop code if necessary -@rem - -@rem Check if we're running hadoop directly from the build -if exist %HADOOP_COMMON_HOME%\target\bin ( - if defined JAVA_LIBRARY_PATH ( - set JAVA_LIBRARY_PATH=%JAVA_LIBRARY_PATH%;%HADOOP_COMMON_HOME%\target\bin - ) else ( - set JAVA_LIBRARY_PATH=%HADOOP_COMMON_HOME%\target\bin - ) -) - -@rem For the distro case, check the bin folder -if exist %HADOOP_COMMON_HOME%\bin ( - if defined JAVA_LIBRARY_PATH ( - set JAVA_LIBRARY_PATH=%JAVA_LIBRARY_PATH%;%HADOOP_COMMON_HOME%\bin - ) else ( - set JAVA_LIBRARY_PATH=%HADOOP_COMMON_HOME%\bin - ) -) - -@rem -@rem setup a default TOOL_PATH -@rem -set TOOL_PATH=%HADOOP_HOME%\share\hadoop\tools\lib\* - -set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.log.dir=%HADOOP_LOG_DIR% -set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.log.file=%HADOOP_LOGFILE% -set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.home.dir=%HADOOP_HOME% -set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.id.str=%HADOOP_IDENT_STRING% -set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.root.logger=%HADOOP_ROOT_LOGGER% - -if defined JAVA_LIBRARY_PATH ( - set HADOOP_OPTS=%HADOOP_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH% -) -set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.policy.file=%HADOOP_POLICYFILE% - -@rem -@rem Disable ipv6 as it can cause issues -@rem - -set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true - -@rem -@rem put hdfs in classpath if present -@rem - -if not defined HADOOP_HDFS_HOME ( - if exist %HADOOP_HOME%\%HDFS_DIR% ( - set HADOOP_HDFS_HOME=%HADOOP_HOME% - ) -) - -if exist %HADOOP_HDFS_HOME%\%HDFS_DIR%\webapps ( - set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_DIR% -) - -if exist %HADOOP_HDFS_HOME%\%HDFS_LIB_JARS_DIR% ( - set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_LIB_JARS_DIR%\* -) - -set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_DIR%\* - -@rem -@rem put yarn in classpath if present -@rem - -if not defined HADOOP_YARN_HOME ( - if exist %HADOOP_HOME%\%YARN_DIR% ( - set HADOOP_YARN_HOME=%HADOOP_HOME% - ) -) - -if exist %HADOOP_YARN_HOME%\%YARN_DIR%\webapps ( - set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_DIR% -) - -if exist %HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR% ( - set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\* -) - -set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_DIR%\* - -@rem -@rem put mapred in classpath if present AND different from YARN -@rem - -if not defined HADOOP_MAPRED_HOME ( - if exist %HADOOP_HOME%\%MAPRED_DIR% ( - set HADOOP_MAPRED_HOME=%HADOOP_HOME% - ) -) - -if not "%HADOOP_MAPRED_HOME%\%MAPRED_DIR%" == "%HADOOP_YARN_HOME%\%YARN_DIR%" ( - - if exist %HADOOP_MAPRED_HOME%\%MAPRED_DIR%\webapps ( - set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_DIR% - ) - - if exist %HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR% ( - set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR%\* - ) - - set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_DIR%\* -) - -@rem -@rem add user-specified CLASSPATH last -@rem - -if defined HADOOP_CLASSPATH ( - if not defined HADOOP_USE_CLIENT_CLASSLOADER ( - if defined HADOOP_USER_CLASSPATH_FIRST ( - set CLASSPATH=%HADOOP_CLASSPATH%;%CLASSPATH%; - ) else ( - set CLASSPATH=%CLASSPATH%;%HADOOP_CLASSPATH%; - ) - ) -) - -:eof diff --git a/hadoop-hdds/common/src/main/bin/hadoop-config.sh b/hadoop-hdds/common/src/main/bin/hadoop-config.sh deleted file mode 100755 index 444b79a362953..0000000000000 --- a/hadoop-hdds/common/src/main/bin/hadoop-config.sh +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#### -# IMPORTANT -#### - -## The hadoop-config.sh tends to get executed by non-Hadoop scripts. -## Those parts expect this script to parse/manipulate $@. In order -## to maintain backward compatibility, this means a surprising -## lack of functions for bits that would be much better off in -## a function. -## -## In other words, yes, there is some bad things happen here and -## unless we break the rest of the ecosystem, we can't change it. :( - - -# included in all the hadoop scripts with source command -# should not be executable directly -# also should not be passed any arguments, since we need original $* -# -# after doing more config, caller should also exec finalize -# function to finish last minute/default configs for -# settings that might be different between daemons & interactive - -# you must be this high to ride the ride -if [[ -z "${BASH_VERSINFO[0]}" ]] \ - || [[ "${BASH_VERSINFO[0]}" -lt 3 ]] \ - || [[ "${BASH_VERSINFO[0]}" -eq 3 && "${BASH_VERSINFO[1]}" -lt 2 ]]; then - echo "bash v3.2+ is required. Sorry." - exit 1 -fi - -# In order to get partially bootstrapped, we need to figure out where -# we are located. Chances are good that our caller has already done -# this work for us, but just in case... - -if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then - _hadoop_common_this="${BASH_SOURCE-$0}" - HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hadoop_common_this}")" >/dev/null && pwd -P) -fi - -# get our functions defined for usage later -if [[ -n "${HADOOP_COMMON_HOME}" ]] && - [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh" ]]; then - # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh - . "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh" -elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh" ]]; then - # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh - . "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh" -else - echo "ERROR: Unable to exec ${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh." 1>&2 - exit 1 -fi - -hadoop_deprecate_envvar HADOOP_PREFIX HADOOP_HOME - -# allow overrides of the above and pre-defines of the below -if [[ -n "${HADOOP_COMMON_HOME}" ]] && - [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh" ]]; then - # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example - . "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh" -elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh" ]]; then - # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example - . "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh" -fi - -# -# IMPORTANT! We are not executing user provided code yet! -# - -# Let's go! Base definitions so we can move forward -hadoop_bootstrap - -# let's find our conf. -# -# first, check and process params passed to us -# we process this in-line so that we can directly modify $@ -# if something downstream is processing that directly, -# we need to make sure our params have been ripped out -# note that we do many of them here for various utilities. -# this provides consistency and forces a more consistent -# user experience - - -# save these off in case our caller needs them -# shellcheck disable=SC2034 -HADOOP_USER_PARAMS=("$@") - -hadoop_parse_args "$@" -shift "${HADOOP_PARSE_COUNTER}" - -# -# Setup the base-line environment -# -hadoop_find_confdir -hadoop_exec_hadoopenv -hadoop_import_shellprofiles -hadoop_exec_userfuncs - -# -# IMPORTANT! User provided code is now available! -# - -hadoop_exec_user_hadoopenv -hadoop_verify_confdir - -hadoop_deprecate_envvar HADOOP_SLAVES HADOOP_WORKERS -hadoop_deprecate_envvar HADOOP_SLAVE_NAMES HADOOP_WORKER_NAMES -hadoop_deprecate_envvar HADOOP_SLAVE_SLEEP HADOOP_WORKER_SLEEP - -# do all the OS-specific startup bits here -# this allows us to get a decent JAVA_HOME, -# call crle for LD_LIBRARY_PATH, etc. -hadoop_os_tricks - -hadoop_java_setup - -hadoop_basic_init - -# inject any sub-project overrides, defaults, etc. -if declare -F hadoop_subproject_init >/dev/null ; then - hadoop_subproject_init -fi - -hadoop_shellprofiles_init - -# get the native libs in there pretty quick -hadoop_add_javalibpath "${HADOOP_HOME}/build/native" -hadoop_add_javalibpath "${HADOOP_HOME}/${HADOOP_COMMON_LIB_NATIVE_DIR}" - -hadoop_shellprofiles_nativelib - -# get the basic java class path for these subprojects -# in as quickly as possible since other stuff -# will definitely depend upon it. - -hadoop_add_common_to_classpath -hadoop_shellprofiles_classpath - -# user API commands can now be run since the runtime -# environment has been configured -hadoop_exec_hadooprc - -# -# backwards compatibility. new stuff should -# call this when they are ready -# -if [[ -z "${HADOOP_NEW_CONFIG}" ]]; then - hadoop_finalize -fi diff --git a/hadoop-hdds/common/src/main/bin/hadoop-daemons.sh b/hadoop-hdds/common/src/main/bin/hadoop-daemons.sh deleted file mode 100755 index 55304916ad1f7..0000000000000 --- a/hadoop-hdds/common/src/main/bin/hadoop-daemons.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Run a Hadoop command on all slave hosts. - -function hadoop_usage -{ - echo "Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] (start|stop|status) " -} - -this="${BASH_SOURCE-$0}" -bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) - -# let's locate libexec... -if [[ -n "${HADOOP_HOME}" ]]; then - HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" -else - HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec" -fi - -HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}" -# shellcheck disable=SC2034 -HADOOP_NEW_CONFIG=true -if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then - . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" -else - echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1 - exit 1 -fi - -if [[ $# = 0 ]]; then - hadoop_exit_with_usage 1 -fi - -daemonmode=$1 -shift - -if [[ -z "${HADOOP_HDFS_HOME}" ]]; then - hdfsscript="${HADOOP_HOME}/bin/hdfs" -else - hdfsscript="${HADOOP_HDFS_HOME}/bin/hdfs" -fi - -hadoop_error "WARNING: Use of this script to ${daemonmode} HDFS daemons is deprecated." -hadoop_error "WARNING: Attempting to execute replacement \"hdfs --workers --daemon ${daemonmode}\" instead." - -# -# Original input was usually: -# hadoop-daemons.sh (shell options) (start|stop) (datanode|...) (daemon options) -# we're going to turn this into -# hdfs --workers --daemon (start|stop) (rest of options) -# -for (( i = 0; i < ${#HADOOP_USER_PARAMS[@]}; i++ )) -do - if [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^start$ ]] || - [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^stop$ ]] || - [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^status$ ]]; then - unset HADOOP_USER_PARAMS[$i] - fi -done - -${hdfsscript} --workers --daemon "${daemonmode}" "${HADOOP_USER_PARAMS[@]}" diff --git a/hadoop-hdds/common/src/main/bin/hadoop-functions.sh b/hadoop-hdds/common/src/main/bin/hadoop-functions.sh deleted file mode 100755 index 484fe2302f9ba..0000000000000 --- a/hadoop-hdds/common/src/main/bin/hadoop-functions.sh +++ /dev/null @@ -1,2732 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# we need to declare this globally as an array, which can only -# be done outside of a function -declare -a HADOOP_SUBCMD_USAGE -declare -a HADOOP_OPTION_USAGE -declare -a HADOOP_SUBCMD_USAGE_TYPES - -## @description Print a message to stderr -## @audience public -## @stability stable -## @replaceable no -## @param string -function hadoop_error -{ - echo "$*" 1>&2 -} - -## @description Print a message to stderr if --debug is turned on -## @audience public -## @stability stable -## @replaceable no -## @param string -function hadoop_debug -{ - if [[ -n "${HADOOP_SHELL_SCRIPT_DEBUG}" ]]; then - echo "DEBUG: $*" 1>&2 - fi -} - -## @description Given a filename or dir, return the absolute version of it -## @description This works as an alternative to readlink, which isn't -## @description portable. -## @audience public -## @stability stable -## @param fsobj -## @replaceable no -## @return 0 success -## @return 1 failure -## @return stdout abspath -function hadoop_abs -{ - declare obj=$1 - declare dir - declare fn - declare dirret - - if [[ ! -e ${obj} ]]; then - return 1 - elif [[ -d ${obj} ]]; then - dir=${obj} - else - dir=$(dirname -- "${obj}") - fn=$(basename -- "${obj}") - fn="/${fn}" - fi - - dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P) - dirret=$? - if [[ ${dirret} = 0 ]]; then - echo "${dir}${fn}" - return 0 - fi - return 1 -} - -## @description Given variable $1 delete $2 from it -## @audience public -## @stability stable -## @replaceable no -function hadoop_delete_entry -{ - if [[ ${!1} =~ \ ${2}\ ]] ; then - hadoop_debug "Removing ${2} from ${1}" - eval "${1}"=\""${!1// ${2} }"\" - fi -} - -## @description Given variable $1 add $2 to it -## @audience public -## @stability stable -## @replaceable no -function hadoop_add_entry -{ - if [[ ! ${!1} =~ \ ${2}\ ]] ; then - hadoop_debug "Adding ${2} to ${1}" - #shellcheck disable=SC2140 - eval "${1}"=\""${!1} ${2} "\" - fi -} - -## @description Given variable $1 determine if $2 is in it -## @audience public -## @stability stable -## @replaceable no -## @return 0 = yes, 1 = no -function hadoop_verify_entry -{ - # this unfortunately can't really be tested by bats. :( - # so if this changes, be aware that unit tests effectively - # do this function in them - [[ ${!1} =~ \ ${2}\ ]] -} - -## @description Check if an array has a given value -## @audience public -## @stability stable -## @replaceable yes -## @param element -## @param array -## @returns 0 = yes -## @returns 1 = no -function hadoop_array_contains -{ - declare element=$1 - shift - declare val - - if [[ "$#" -eq 0 ]]; then - return 1 - fi - - for val in "${@}"; do - if [[ "${val}" == "${element}" ]]; then - return 0 - fi - done - return 1 -} - -## @description Add the `appendstring` if `checkstring` is not -## @description present in the given array -## @audience public -## @stability stable -## @replaceable yes -## @param envvar -## @param appendstring -function hadoop_add_array_param -{ - declare arrname=$1 - declare add=$2 - - declare arrref="${arrname}[@]" - declare array=("${!arrref}") - - if ! hadoop_array_contains "${add}" "${array[@]}"; then - #shellcheck disable=SC1083,SC2086 - eval ${arrname}=\(\"\${array[@]}\" \"${add}\" \) - hadoop_debug "$1 accepted $2" - else - hadoop_debug "$1 declined $2" - fi -} - -## @description Sort an array (must not contain regexps) -## @description present in the given array -## @audience public -## @stability stable -## @replaceable yes -## @param arrayvar -function hadoop_sort_array -{ - declare arrname=$1 - declare arrref="${arrname}[@]" - declare array=("${!arrref}") - declare oifs - - declare globstatus - declare -a sa - - globstatus=$(set -o | grep noglob | awk '{print $NF}') - - set -f - oifs=${IFS} - - # shellcheck disable=SC2034 - IFS=$'\n' sa=($(sort <<<"${array[*]}")) - - # shellcheck disable=SC1083 - eval "${arrname}"=\(\"\${sa[@]}\"\) - - IFS=${oifs} - if [[ "${globstatus}" = off ]]; then - set +f - fi -} - -## @description Check if we are running with priv -## @description by default, this implementation looks for -## @description EUID=0. For OSes that have true priv -## @description separation, this should be something more complex -## @audience private -## @stability evolving -## @replaceable yes -## @return 1 = no priv -## @return 0 = priv -function hadoop_privilege_check -{ - [[ "${EUID}" = 0 ]] -} - -## @description Execute a command via su when running as root -## @description if the given user is found or exit with -## @description failure if not. -## @description otherwise just run it. (This is intended to -## @description be used by the start-*/stop-* scripts.) -## @audience private -## @stability evolving -## @replaceable yes -## @param user -## @param commandstring -## @return exitstatus -function hadoop_su -{ - declare user=$1 - shift - - if hadoop_privilege_check; then - if hadoop_verify_user_resolves user; then - su -l "${user}" -- "$@" - else - hadoop_error "ERROR: Refusing to run as root: ${user} account is not found. Aborting." - return 1 - fi - else - "$@" - fi -} - -## @description Execute a command via su when running as root -## @description with extra support for commands that might -## @description legitimately start as root (e.g., datanode) -## @description (This is intended to -## @description be used by the start-*/stop-* scripts.) -## @audience private -## @stability evolving -## @replaceable no -## @param user -## @param commandstring -## @return exitstatus -function hadoop_uservar_su -{ - - ## startup matrix: - # - # if $EUID != 0, then exec - # if $EUID =0 then - # if hdfs_subcmd_user is defined, call hadoop_su to exec - # if hdfs_subcmd_user is not defined, error - # - # For secure daemons, this means both the secure and insecure env vars need to be - # defined. e.g., HDFS_DATANODE_USER=root HDFS_DATANODE_SECURE_USER=hdfs - # This function will pick up the "normal" var, switch to that user, then - # execute the command which will then pick up the "secure" version. - # - - declare program=$1 - declare command=$2 - shift 2 - - declare uprogram - declare ucommand - declare uvar - declare svar - - if hadoop_privilege_check; then - uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER) - - svar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_USER) - - if [[ -n "${!uvar}" ]]; then - hadoop_su "${!uvar}" "$@" - elif [[ -n "${!svar}" ]]; then - ## if we are here, then SECURE_USER with no USER defined - ## we are already privileged, so just run the command and hope - ## for the best - "$@" - else - hadoop_error "ERROR: Attempting to operate on ${program} ${command} as root" - hadoop_error "ERROR: but there is no ${uvar} defined. Aborting operation." - return 1 - fi - else - "$@" - fi -} - -## @description Add a subcommand to the usage output -## @audience private -## @stability evolving -## @replaceable no -## @param subcommand -## @param subcommandtype -## @param subcommanddesc -function hadoop_add_subcommand -{ - declare subcmd=$1 - declare subtype=$2 - declare text=$3 - - hadoop_debug "${subcmd} as a ${subtype}" - - hadoop_add_array_param HADOOP_SUBCMD_USAGE_TYPES "${subtype}" - - # done in this order so that sort works later - HADOOP_SUBCMD_USAGE[${HADOOP_SUBCMD_USAGE_COUNTER}]="${subcmd}@${subtype}@${text}" - ((HADOOP_SUBCMD_USAGE_COUNTER=HADOOP_SUBCMD_USAGE_COUNTER+1)) -} - -## @description Add an option to the usage output -## @audience private -## @stability evolving -## @replaceable no -## @param subcommand -## @param subcommanddesc -function hadoop_add_option -{ - local option=$1 - local text=$2 - - HADOOP_OPTION_USAGE[${HADOOP_OPTION_USAGE_COUNTER}]="${option}@${text}" - ((HADOOP_OPTION_USAGE_COUNTER=HADOOP_OPTION_USAGE_COUNTER+1)) -} - -## @description Reset the usage information to blank -## @audience private -## @stability evolving -## @replaceable no -function hadoop_reset_usage -{ - HADOOP_SUBCMD_USAGE=() - HADOOP_OPTION_USAGE=() - HADOOP_SUBCMD_USAGE_TYPES=() - HADOOP_SUBCMD_USAGE_COUNTER=0 - HADOOP_OPTION_USAGE_COUNTER=0 -} - -## @description Print a screen-size aware two-column output -## @description if reqtype is not null, only print those requested -## @audience private -## @stability evolving -## @replaceable no -## @param reqtype -## @param array -function hadoop_generic_columnprinter -{ - declare reqtype=$1 - shift - declare -a input=("$@") - declare -i i=0 - declare -i counter=0 - declare line - declare text - declare option - declare giventext - declare -i maxoptsize - declare -i foldsize - declare -a tmpa - declare numcols - declare brup - - if [[ -n "${COLUMNS}" ]]; then - numcols=${COLUMNS} - else - numcols=$(tput cols) 2>/dev/null - COLUMNS=${numcols} - fi - - if [[ -z "${numcols}" - || ! "${numcols}" =~ ^[0-9]+$ ]]; then - numcols=75 - else - ((numcols=numcols-5)) - fi - - while read -r line; do - tmpa[${counter}]=${line} - ((counter=counter+1)) - IFS='@' read -ra brup <<< "${line}" - option="${brup[0]}" - if [[ ${#option} -gt ${maxoptsize} ]]; then - maxoptsize=${#option} - fi - done < <(for text in "${input[@]}"; do - echo "${text}" - done | sort) - - i=0 - ((foldsize=numcols-maxoptsize)) - - until [[ $i -eq ${#tmpa[@]} ]]; do - IFS='@' read -ra brup <<< "${tmpa[$i]}" - - option="${brup[0]}" - cmdtype="${brup[1]}" - giventext="${brup[2]}" - - if [[ -n "${reqtype}" ]]; then - if [[ "${cmdtype}" != "${reqtype}" ]]; then - ((i=i+1)) - continue - fi - fi - - if [[ -z "${giventext}" ]]; then - giventext=${cmdtype} - fi - - while read -r line; do - printf "%-${maxoptsize}s %-s\n" "${option}" "${line}" - option=" " - done < <(echo "${giventext}"| fold -s -w ${foldsize}) - ((i=i+1)) - done -} - -## @description generate standard usage output -## @description and optionally takes a class -## @audience private -## @stability evolving -## @replaceable no -## @param execname -## @param true|false -## @param [text to use in place of SUBCOMMAND] -function hadoop_generate_usage -{ - declare cmd=$1 - declare takesclass=$2 - declare subcmdtext=${3:-"SUBCOMMAND"} - declare haveoptions - declare optstring - declare havesubs - declare subcmdstring - declare cmdtype - - cmd=${cmd##*/} - - if [[ -n "${HADOOP_OPTION_USAGE_COUNTER}" - && "${HADOOP_OPTION_USAGE_COUNTER}" -gt 0 ]]; then - haveoptions=true - optstring=" [OPTIONS]" - fi - - if [[ -n "${HADOOP_SUBCMD_USAGE_COUNTER}" - && "${HADOOP_SUBCMD_USAGE_COUNTER}" -gt 0 ]]; then - havesubs=true - subcmdstring=" ${subcmdtext} [${subcmdtext} OPTIONS]" - fi - - echo "Usage: ${cmd}${optstring}${subcmdstring}" - if [[ ${takesclass} = true ]]; then - echo " or ${cmd}${optstring} CLASSNAME [CLASSNAME OPTIONS]" - echo " where CLASSNAME is a user-provided Java class" - fi - - if [[ "${haveoptions}" = true ]]; then - echo "" - echo " OPTIONS is none or any of:" - echo "" - - hadoop_generic_columnprinter "" "${HADOOP_OPTION_USAGE[@]}" - fi - - if [[ "${havesubs}" = true ]]; then - echo "" - echo " ${subcmdtext} is one of:" - echo "" - - if [[ "${#HADOOP_SUBCMD_USAGE_TYPES[@]}" -gt 0 ]]; then - - hadoop_sort_array HADOOP_SUBCMD_USAGE_TYPES - for subtype in "${HADOOP_SUBCMD_USAGE_TYPES[@]}"; do - #shellcheck disable=SC2086 - cmdtype="$(tr '[:lower:]' '[:upper:]' <<< ${subtype:0:1})${subtype:1}" - printf "\n %s Commands:\n\n" "${cmdtype}" - hadoop_generic_columnprinter "${subtype}" "${HADOOP_SUBCMD_USAGE[@]}" - done - else - hadoop_generic_columnprinter "" "${HADOOP_SUBCMD_USAGE[@]}" - fi - echo "" - echo "${subcmdtext} may print help when invoked w/o parameters or with -h." - fi -} - -## @description Replace `oldvar` with `newvar` if `oldvar` exists. -## @audience public -## @stability stable -## @replaceable yes -## @param oldvar -## @param newvar -function hadoop_deprecate_envvar -{ - local oldvar=$1 - local newvar=$2 - local oldval=${!oldvar} - local newval=${!newvar} - - if [[ -n "${oldval}" ]]; then - hadoop_error "WARNING: ${oldvar} has been replaced by ${newvar}. Using value of ${oldvar}." - # shellcheck disable=SC2086 - eval ${newvar}=\"${oldval}\" - - # shellcheck disable=SC2086 - newval=${oldval} - - # shellcheck disable=SC2086 - eval ${newvar}=\"${newval}\" - fi -} - -## @description Declare `var` being used and print its value. -## @audience public -## @stability stable -## @replaceable yes -## @param var -function hadoop_using_envvar -{ - local var=$1 - local val=${!var} - - if [[ -n "${val}" ]]; then - hadoop_debug "${var} = ${val}" - fi -} - -## @description Create the directory 'dir'. -## @audience public -## @stability stable -## @replaceable yes -## @param dir -function hadoop_mkdir -{ - local dir=$1 - - if [[ ! -w "${dir}" ]] && [[ ! -d "${dir}" ]]; then - hadoop_error "WARNING: ${dir} does not exist. Creating." - if ! mkdir -p "${dir}"; then - hadoop_error "ERROR: Unable to create ${dir}. Aborting." - exit 1 - fi - fi -} - -## @description Bootstraps the Hadoop shell environment -## @audience private -## @stability evolving -## @replaceable no -function hadoop_bootstrap -{ - # the root of the Hadoop installation - # See HADOOP-6255 for the expected directory structure layout - - if [[ -n "${DEFAULT_LIBEXEC_DIR}" ]]; then - hadoop_error "WARNING: DEFAULT_LIBEXEC_DIR ignored. It has been replaced by HADOOP_DEFAULT_LIBEXEC_DIR." - fi - - # By now, HADOOP_LIBEXEC_DIR should have been defined upstream - # We can piggyback off of that to figure out where the default - # HADOOP_FREFIX should be. This allows us to run without - # HADOOP_HOME ever being defined by a human! As a consequence - # HADOOP_LIBEXEC_DIR now becomes perhaps the single most powerful - # env var within Hadoop. - if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then - hadoop_error "HADOOP_LIBEXEC_DIR is not defined. Exiting." - exit 1 - fi - HADOOP_DEFAULT_PREFIX=$(cd -P -- "${HADOOP_LIBEXEC_DIR}/.." >/dev/null && pwd -P) - HADOOP_HOME=${HADOOP_HOME:-$HADOOP_DEFAULT_PREFIX} - export HADOOP_HOME - - # - # short-cuts. vendors may redefine these as well, preferably - # in hadoop-layout.sh - # - HADOOP_COMMON_DIR=${HADOOP_COMMON_DIR:-"share/hadoop/common"} - HADOOP_COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR:-"share/hadoop/common/lib"} - HADOOP_COMMON_LIB_NATIVE_DIR=${HADOOP_COMMON_LIB_NATIVE_DIR:-"lib/native"} - HDFS_DIR=${HDFS_DIR:-"share/hadoop/hdfs"} - HDFS_LIB_JARS_DIR=${HDFS_LIB_JARS_DIR:-"share/hadoop/hdfs/lib"} - YARN_DIR=${YARN_DIR:-"share/hadoop/yarn"} - YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"} - MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"} - MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"} - HDDS_DIR=${HDDS_DIR:-"share/hadoop/hdds"} - HDDS_LIB_JARS_DIR=${HDDS_LIB_JARS_DIR:-"share/hadoop/hdds/lib"} - OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"} - OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"} - OZONEFS_DIR=${OZONEFS_DIR:-"share/hadoop/ozonefs"} - - HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}} - HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"} - HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"} - - # by default, whatever we are about to run doesn't support - # daemonization - HADOOP_SUBCMD_SUPPORTDAEMONIZATION=false - - # by default, we have not been self-re-execed - HADOOP_REEXECED_CMD=false - - HADOOP_SUBCMD_SECURESERVICE=false - - # This is the default we claim in hadoop-env.sh - JSVC_HOME=${JSVC_HOME:-"/usr/bin"} - - # usage output set to zero - hadoop_reset_usage - - export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)} - - # defaults - export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"} - hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}" -} - -## @description Locate Hadoop's configuration directory -## @audience private -## @stability evolving -## @replaceable no -function hadoop_find_confdir -{ - local conf_dir - - # An attempt at compatibility with some Hadoop 1.x - # installs. - if [[ -e "${HADOOP_HOME}/conf/hadoop-env.sh" ]]; then - conf_dir="conf" - else - conf_dir="etc/hadoop" - fi - export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_HOME}/${conf_dir}}" - - hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}" -} - -## @description Validate ${HADOOP_CONF_DIR} -## @audience public -## @stability stable -## @replaceable yes -## @return will exit on failure conditions -function hadoop_verify_confdir -{ - # Check only log4j.properties by default. - # --loglevel does not work without logger settings in log4j.log4j.properties. - if [[ ! -f "${HADOOP_CONF_DIR}/log4j.properties" ]]; then - hadoop_error "WARNING: log4j.properties is not found. HADOOP_CONF_DIR may be incomplete." - fi -} - -## @description Import the hadoop-env.sh settings -## @audience private -## @stability evolving -## @replaceable no -function hadoop_exec_hadoopenv -{ - if [[ -z "${HADOOP_ENV_PROCESSED}" ]]; then - if [[ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]]; then - export HADOOP_ENV_PROCESSED=true - # shellcheck source=./hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh - . "${HADOOP_CONF_DIR}/hadoop-env.sh" - fi - fi -} - -## @description Import the replaced functions -## @audience private -## @stability evolving -## @replaceable no -function hadoop_exec_userfuncs -{ - if [[ -e "${HADOOP_CONF_DIR}/hadoop-user-functions.sh" ]]; then - # shellcheck disable=SC1090 - . "${HADOOP_CONF_DIR}/hadoop-user-functions.sh" - fi -} - -## @description Read the user's settings. This provides for users to -## @description override and/or append hadoop-env.sh. It is not meant -## @description as a complete system override. -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_exec_user_hadoopenv -{ - if [[ -f "${HOME}/.hadoop-env" ]]; then - hadoop_debug "Applying the user's .hadoop-env" - # shellcheck disable=SC1090 - . "${HOME}/.hadoop-env" - fi -} - -## @description Read the user's settings. This provides for users to -## @description run Hadoop Shell API after system bootstrap -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_exec_hadooprc -{ - if [[ -f "${HOME}/.hadooprc" ]]; then - hadoop_debug "Applying the user's .hadooprc" - # shellcheck disable=SC1090 - . "${HOME}/.hadooprc" - fi -} - -## @description Import shellprofile.d content -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_import_shellprofiles -{ - local i - local files1 - local files2 - - if [[ -d "${HADOOP_LIBEXEC_DIR}/shellprofile.d" ]]; then - files1=(${HADOOP_LIBEXEC_DIR}/shellprofile.d/*.sh) - hadoop_debug "shellprofiles: ${files1[*]}" - else - hadoop_error "WARNING: ${HADOOP_LIBEXEC_DIR}/shellprofile.d doesn't exist. Functionality may not work." - fi - - if [[ -d "${HADOOP_CONF_DIR}/shellprofile.d" ]]; then - files2=(${HADOOP_CONF_DIR}/shellprofile.d/*.sh) - fi - - # enable bundled shellprofiles that come - # from hadoop-tools. This converts the user-facing HADOOP_OPTIONAL_TOOLS - # to the HADOOP_TOOLS_OPTIONS that the shell profiles expect. - # See dist-tools-hooks-maker for how the example HADOOP_OPTIONAL_TOOLS - # gets populated into hadoop-env.sh - - for i in ${HADOOP_OPTIONAL_TOOLS//,/ }; do - hadoop_add_entry HADOOP_TOOLS_OPTIONS "${i}" - done - - for i in "${files1[@]}" "${files2[@]}" - do - if [[ -n "${i}" - && -f "${i}" ]]; then - hadoop_debug "Profiles: importing ${i}" - # shellcheck disable=SC1090 - . "${i}" - fi - done -} - -## @description Initialize the registered shell profiles -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_shellprofiles_init -{ - local i - - for i in ${HADOOP_SHELL_PROFILES} - do - if declare -F _${i}_hadoop_init >/dev/null ; then - hadoop_debug "Profiles: ${i} init" - # shellcheck disable=SC2086 - _${i}_hadoop_init - fi - done -} - -## @description Apply the shell profile classpath additions -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_shellprofiles_classpath -{ - local i - - for i in ${HADOOP_SHELL_PROFILES} - do - if declare -F _${i}_hadoop_classpath >/dev/null ; then - hadoop_debug "Profiles: ${i} classpath" - # shellcheck disable=SC2086 - _${i}_hadoop_classpath - fi - done -} - -## @description Apply the shell profile native library additions -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_shellprofiles_nativelib -{ - local i - - for i in ${HADOOP_SHELL_PROFILES} - do - if declare -F _${i}_hadoop_nativelib >/dev/null ; then - hadoop_debug "Profiles: ${i} nativelib" - # shellcheck disable=SC2086 - _${i}_hadoop_nativelib - fi - done -} - -## @description Apply the shell profile final configuration -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_shellprofiles_finalize -{ - local i - - for i in ${HADOOP_SHELL_PROFILES} - do - if declare -F _${i}_hadoop_finalize >/dev/null ; then - hadoop_debug "Profiles: ${i} finalize" - # shellcheck disable=SC2086 - _${i}_hadoop_finalize - fi - done -} - -## @description Initialize the Hadoop shell environment, now that -## @description user settings have been imported -## @audience private -## @stability evolving -## @replaceable no -function hadoop_basic_init -{ - # Some of these are also set in hadoop-env.sh. - # we still set them here just in case hadoop-env.sh is - # broken in some way, set up defaults, etc. - # - # but it is important to note that if you update these - # you also need to update hadoop-env.sh as well!!! - - CLASSPATH="" - hadoop_debug "Initialize CLASSPATH" - - if [[ -z "${HADOOP_COMMON_HOME}" ]] && - [[ -d "${HADOOP_HOME}/${HADOOP_COMMON_DIR}" ]]; then - export HADOOP_COMMON_HOME="${HADOOP_HOME}" - fi - - # default policy file for service-level authorization - HADOOP_POLICYFILE=${HADOOP_POLICYFILE:-"hadoop-policy.xml"} - - # define HADOOP_HDFS_HOME - if [[ -z "${HADOOP_HDFS_HOME}" ]] && - [[ -d "${HADOOP_HOME}/${HDFS_DIR}" ]]; then - export HADOOP_HDFS_HOME="${HADOOP_HOME}" - fi - - # define HADOOP_YARN_HOME - if [[ -z "${HADOOP_YARN_HOME}" ]] && - [[ -d "${HADOOP_HOME}/${YARN_DIR}" ]]; then - export HADOOP_YARN_HOME="${HADOOP_HOME}" - fi - - # define HADOOP_MAPRED_HOME - if [[ -z "${HADOOP_MAPRED_HOME}" ]] && - [[ -d "${HADOOP_HOME}/${MAPRED_DIR}" ]]; then - export HADOOP_MAPRED_HOME="${HADOOP_HOME}" - fi - - if [[ ! -d "${HADOOP_COMMON_HOME}" ]]; then - hadoop_error "ERROR: Invalid HADOOP_COMMON_HOME" - exit 1 - fi - - if [[ ! -d "${HADOOP_HDFS_HOME}" ]]; then - hadoop_error "ERROR: Invalid HADOOP_HDFS_HOME" - exit 1 - fi - - if [[ ! -d "${HADOOP_YARN_HOME}" ]]; then - hadoop_error "ERROR: Invalid HADOOP_YARN_HOME" - exit 1 - fi - - if [[ ! -d "${HADOOP_MAPRED_HOME}" ]]; then - hadoop_error "ERROR: Invalid HADOOP_MAPRED_HOME" - exit 1 - fi - - # if for some reason the shell doesn't have $USER defined - # (e.g., ssh'd in to execute a command) - # let's get the effective username and use that - USER=${USER:-$(id -nu)} - HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER} - HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_HOME}/logs"} - HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log} - HADOOP_LOGLEVEL=${HADOOP_LOGLEVEL:-INFO} - HADOOP_NICENESS=${HADOOP_NICENESS:-0} - HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5} - HADOOP_PID_DIR=${HADOOP_PID_DIR:-/tmp} - HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console} - HADOOP_DAEMON_ROOT_LOGGER=${HADOOP_DAEMON_ROOT_LOGGER:-${HADOOP_LOGLEVEL},RFA} - HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender} - HADOOP_SSH_OPTS=${HADOOP_SSH_OPTS-"-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"} - HADOOP_SECURE_LOG_DIR=${HADOOP_SECURE_LOG_DIR:-${HADOOP_LOG_DIR}} - HADOOP_SECURE_PID_DIR=${HADOOP_SECURE_PID_DIR:-${HADOOP_PID_DIR}} - HADOOP_SSH_PARALLEL=${HADOOP_SSH_PARALLEL:-10} -} - -## @description Set the worker support information to the contents -## @description of `filename` -## @audience public -## @stability stable -## @replaceable no -## @param filename -## @return will exit if file does not exist -function hadoop_populate_workers_file -{ - local workersfile=$1 - shift - if [[ -f "${workersfile}" ]]; then - HADOOP_WORKERS="${workersfile}" - elif [[ -f "${HADOOP_CONF_DIR}/${workersfile}" ]]; then - HADOOP_WORKERS="${HADOOP_CONF_DIR}/${workersfile}" - else - hadoop_error "ERROR: Cannot find hosts file \"${workersfile}\"" - hadoop_exit_with_usage 1 - fi -} - -## @description Rotates the given `file` until `number` of -## @description files exist. -## @audience public -## @stability stable -## @replaceable no -## @param filename -## @param [number] -## @return $? will contain last mv's return value -function hadoop_rotate_log -{ - # - # Users are likely to replace this one for something - # that gzips or uses dates or who knows what. - # - # be aware that &1 and &2 might go through here - # so don't do anything too crazy... - # - local log=$1; - local num=${2:-5}; - - if [[ -f "${log}" ]]; then # rotate logs - while [[ ${num} -gt 1 ]]; do - #shellcheck disable=SC2086 - let prev=${num}-1 - if [[ -f "${log}.${prev}" ]]; then - mv "${log}.${prev}" "${log}.${num}" - fi - num=${prev} - done - mv "${log}" "${log}.${num}" - fi -} - -## @description Via ssh, log into `hostname` and run `command` -## @audience private -## @stability evolving -## @replaceable yes -## @param hostname -## @param command -## @param [...] -function hadoop_actual_ssh -{ - # we are passing this function to xargs - # should get hostname followed by rest of command line - local worker=$1 - shift - - # shellcheck disable=SC2086 - ssh ${HADOOP_SSH_OPTS} ${worker} $"${@// /\\ }" 2>&1 | sed "s/^/$worker: /" -} - -## @description Connect to ${HADOOP_WORKERS} or ${HADOOP_WORKER_NAMES} -## @description and execute command. -## @audience private -## @stability evolving -## @replaceable yes -## @param command -## @param [...] -function hadoop_connect_to_hosts -{ - # shellcheck disable=SC2124 - local params="$@" - local worker_file - local tmpslvnames - - # - # ssh (or whatever) to a host - # - # User can specify hostnames or a file where the hostnames are (not both) - if [[ -n "${HADOOP_WORKERS}" && -n "${HADOOP_WORKER_NAMES}" ]] ; then - hadoop_error "ERROR: Both HADOOP_WORKERS and HADOOP_WORKER_NAMES were defined. Aborting." - exit 1 - elif [[ -z "${HADOOP_WORKER_NAMES}" ]]; then - if [[ -n "${HADOOP_WORKERS}" ]]; then - worker_file=${HADOOP_WORKERS} - elif [[ -f "${HADOOP_CONF_DIR}/workers" ]]; then - worker_file=${HADOOP_CONF_DIR}/workers - elif [[ -f "${HADOOP_CONF_DIR}/slaves" ]]; then - hadoop_error "WARNING: 'slaves' file has been deprecated. Please use 'workers' file instead." - worker_file=${HADOOP_CONF_DIR}/slaves - fi - fi - - # if pdsh is available, let's use it. otherwise default - # to a loop around ssh. (ugh) - if [[ -e '/usr/bin/pdsh' ]]; then - if [[ -z "${HADOOP_WORKER_NAMES}" ]] ; then - # if we were given a file, just let pdsh deal with it. - # shellcheck disable=SC2086 - PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \ - -f "${HADOOP_SSH_PARALLEL}" -w ^"${worker_file}" $"${@// /\\ }" 2>&1 - else - # no spaces allowed in the pdsh arg host list - # shellcheck disable=SC2086 - tmpslvnames=$(echo ${HADOOP_WORKER_NAMES} | tr -s ' ' ,) - PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \ - -f "${HADOOP_SSH_PARALLEL}" \ - -w "${tmpslvnames}" $"${@// /\\ }" 2>&1 - fi - else - if [[ -z "${HADOOP_WORKER_NAMES}" ]]; then - HADOOP_WORKER_NAMES=$(sed 's/#.*$//;/^$/d' "${worker_file}") - fi - hadoop_connect_to_hosts_without_pdsh "${params}" - fi -} - -## @description Connect to ${HADOOP_WORKER_NAMES} and execute command -## @description under the environment which does not support pdsh. -## @audience private -## @stability evolving -## @replaceable yes -## @param command -## @param [...] -function hadoop_connect_to_hosts_without_pdsh -{ - # shellcheck disable=SC2124 - local params="$@" - local workers=(${HADOOP_WORKER_NAMES}) - for (( i = 0; i < ${#workers[@]}; i++ )) - do - if (( i != 0 && i % HADOOP_SSH_PARALLEL == 0 )); then - wait - fi - # shellcheck disable=SC2086 - hadoop_actual_ssh "${workers[$i]}" ${params} & - done - wait -} - -## @description Utility routine to handle --workers mode -## @audience private -## @stability evolving -## @replaceable yes -## @param commandarray -function hadoop_common_worker_mode_execute -{ - # - # input should be the command line as given by the user - # in the form of an array - # - local argv=("$@") - - # if --workers is still on the command line, remove it - # to prevent loops - # Also remove --hostnames and --hosts along with arg values - local argsSize=${#argv[@]}; - for (( i = 0; i < argsSize; i++ )) - do - if [[ "${argv[$i]}" =~ ^--workers$ ]]; then - unset argv[$i] - elif [[ "${argv[$i]}" =~ ^--hostnames$ ]] || - [[ "${argv[$i]}" =~ ^--hosts$ ]]; then - unset argv[$i]; - let i++; - unset argv[$i]; - fi - done - if [[ ${QATESTMODE} = true ]]; then - echo "${argv[@]}" - return - fi - hadoop_connect_to_hosts -- "${argv[@]}" -} - -## @description Verify that a shell command was passed a valid -## @description class name -## @audience public -## @stability stable -## @replaceable yes -## @param classname -## @return 0 = success -## @return 1 = failure w/user message -function hadoop_validate_classname -{ - local class=$1 - shift 1 - - if [[ ! ${class} =~ \. ]]; then - # assuming the arg is typo of command if it does not conatain ".". - # class belonging to no package is not allowed as a result. - hadoop_error "ERROR: ${class} is not COMMAND nor fully qualified CLASSNAME." - return 1 - fi - return 0 -} - -## @description Append the `appendstring` if `checkstring` is not -## @description present in the given `envvar` -## @audience public -## @stability stable -## @replaceable yes -## @param envvar -## @param checkstring -## @param appendstring -function hadoop_add_param -{ - # - # general param dedupe.. - # $1 is what we are adding to - # $2 is the name of what we want to add (key) - # $3 is the key+value of what we're adding - # - # doing it this way allows us to support all sorts of - # different syntaxes, just so long as they are space - # delimited - # - if [[ ! ${!1} =~ $2 ]] ; then - #shellcheck disable=SC2140 - eval "$1"="'${!1} $3'" - if [[ ${!1:0:1} = ' ' ]]; then - #shellcheck disable=SC2140 - eval "$1"="'${!1# }'" - fi - hadoop_debug "$1 accepted $3" - else - hadoop_debug "$1 declined $3" - fi -} - -## @description Register the given `shellprofile` to the Hadoop -## @description shell subsystem -## @audience public -## @stability stable -## @replaceable yes -## @param shellprofile -function hadoop_add_profile -{ - # shellcheck disable=SC2086 - hadoop_add_param HADOOP_SHELL_PROFILES $1 $1 -} - -## @description Add a file system object (directory, file, -## @description wildcard, ...) to the classpath. Optionally provide -## @description a hint as to where in the classpath it should go. -## @audience public -## @stability stable -## @replaceable yes -## @param object -## @param [before|after] -## @return 0 = success (added or duplicate) -## @return 1 = failure (doesn't exist or some other reason) -function hadoop_add_classpath -{ - # However, with classpath (& JLP), we can do dedupe - # along with some sanity checking (e.g., missing directories) - # since we have a better idea of what is legal - # - # for wildcard at end, we can - # at least check the dir exists - if [[ $1 =~ ^.*\*$ ]]; then - local mp - mp=$(dirname "$1") - if [[ ! -d "${mp}" ]]; then - hadoop_debug "Rejected CLASSPATH: $1 (not a dir)" - return 1 - fi - - # no wildcard in the middle, so check existence - # (doesn't matter *what* it is) - elif [[ ! $1 =~ ^.*\*.*$ ]] && [[ ! -e "$1" ]]; then - hadoop_debug "Rejected CLASSPATH: $1 (does not exist)" - return 1 - fi - if [[ -z "${CLASSPATH}" ]]; then - CLASSPATH=$1 - hadoop_debug "Initial CLASSPATH=$1" - elif [[ ":${CLASSPATH}:" != *":$1:"* ]]; then - if [[ "$2" = "before" ]]; then - CLASSPATH="$1:${CLASSPATH}" - hadoop_debug "Prepend CLASSPATH: $1" - else - CLASSPATH+=:$1 - hadoop_debug "Append CLASSPATH: $1" - fi - else - hadoop_debug "Dupe CLASSPATH: $1" - fi - return 0 -} - -## @description Add a file system object (directory, file, -## @description wildcard, ...) to the colonpath. Optionally provide -## @description a hint as to where in the colonpath it should go. -## @description Prior to adding, objects are checked for duplication -## @description and check for existence. Many other functions use -## @description this function as their base implementation -## @description including `hadoop_add_javalibpath` and `hadoop_add_ldlibpath`. -## @audience public -## @stability stable -## @replaceable yes -## @param envvar -## @param object -## @param [before|after] -## @return 0 = success (added or duplicate) -## @return 1 = failure (doesn't exist or some other reason) -function hadoop_add_colonpath -{ - # this is CLASSPATH, JLP, etc but with dedupe but no - # other checking - if [[ -d "${2}" ]] && [[ ":${!1}:" != *":$2:"* ]]; then - if [[ -z "${!1}" ]]; then - # shellcheck disable=SC2086 - eval $1="'$2'" - hadoop_debug "Initial colonpath($1): $2" - elif [[ "$3" = "before" ]]; then - # shellcheck disable=SC2086 - eval $1="'$2:${!1}'" - hadoop_debug "Prepend colonpath($1): $2" - else - # shellcheck disable=SC2086 - eval $1+=":'$2'" - hadoop_debug "Append colonpath($1): $2" - fi - return 0 - fi - hadoop_debug "Rejected colonpath($1): $2" - return 1 -} - -## @description Add a file system object (directory, file, -## @description wildcard, ...) to the Java JNI path. Optionally -## @description provide a hint as to where in the Java JNI path -## @description it should go. -## @audience public -## @stability stable -## @replaceable yes -## @param object -## @param [before|after] -## @return 0 = success (added or duplicate) -## @return 1 = failure (doesn't exist or some other reason) -function hadoop_add_javalibpath -{ - # specialized function for a common use case - hadoop_add_colonpath JAVA_LIBRARY_PATH "$1" "$2" -} - -## @description Add a file system object (directory, file, -## @description wildcard, ...) to the LD_LIBRARY_PATH. Optionally -## @description provide a hint as to where in the LD_LIBRARY_PATH -## @description it should go. -## @audience public -## @stability stable -## @replaceable yes -## @param object -## @param [before|after] -## @return 0 = success (added or duplicate) -## @return 1 = failure (doesn't exist or some other reason) -function hadoop_add_ldlibpath -{ - local status - # specialized function for a common use case - hadoop_add_colonpath LD_LIBRARY_PATH "$1" "$2" - status=$? - - # note that we export this - export LD_LIBRARY_PATH - return ${status} -} - -## @description Add the common/core Hadoop components to the -## @description environment -## @audience private -## @stability evolving -## @replaceable yes -## @returns 1 on failure, may exit -## @returns 0 on success -function hadoop_add_common_to_classpath -{ - # - # get all of the common jars+config in the path - # - - if [[ -z "${HADOOP_COMMON_HOME}" - || -z "${HADOOP_COMMON_DIR}" - || -z "${HADOOP_COMMON_LIB_JARS_DIR}" ]]; then - hadoop_debug "COMMON_HOME=${HADOOP_COMMON_HOME}" - hadoop_debug "COMMON_DIR=${HADOOP_COMMON_DIR}" - hadoop_debug "COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR}" - hadoop_error "ERROR: HADOOP_COMMON_HOME or related vars are not configured." - exit 1 - fi - - # developers - if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then - hadoop_add_classpath "${HADOOP_COMMON_HOME}/hadoop-common/target/classes" - fi - - hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"'/*' - hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"'/*' -} - -## @description Run libexec/tools/module.sh to add to the classpath -## @description environment -## @audience private -## @stability evolving -## @replaceable yes -## @param module -function hadoop_add_to_classpath_tools -{ - declare module=$1 - - if [[ -f "${HADOOP_LIBEXEC_DIR}/tools/${module}.sh" ]]; then - # shellcheck disable=SC1090 - . "${HADOOP_LIBEXEC_DIR}/tools/${module}.sh" - else - hadoop_error "ERROR: Tools helper ${HADOOP_LIBEXEC_DIR}/tools/${module}.sh was not found." - fi - - if declare -f hadoop_classpath_tools_${module} >/dev/null 2>&1; then - "hadoop_classpath_tools_${module}" - fi -} - -## @description Add the user's custom classpath settings to the -## @description environment -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_add_to_classpath_userpath -{ - # Add the user-specified HADOOP_CLASSPATH to the - # official CLASSPATH env var if HADOOP_USE_CLIENT_CLASSLOADER - # is not set. - # Add it first or last depending on if user has - # set env-var HADOOP_USER_CLASSPATH_FIRST - # we'll also dedupe it, because we're cool like that. - # - declare -a array - declare -i c=0 - declare -i j - declare -i i - declare idx - - if [[ -n "${HADOOP_CLASSPATH}" ]]; then - # I wonder if Java runs on VMS. - for idx in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do - array[${c}]=${idx} - ((c=c+1)) - done - - # bats gets confused by j getting set to 0 - ((j=c-1)) || ${QATESTMODE} - - if [[ -z "${HADOOP_USE_CLIENT_CLASSLOADER}" ]]; then - if [[ -z "${HADOOP_USER_CLASSPATH_FIRST}" ]]; then - for ((i=0; i<=j; i++)); do - hadoop_add_classpath "${array[$i]}" after - done - else - for ((i=j; i>=0; i--)); do - hadoop_add_classpath "${array[$i]}" before - done - fi - fi - fi -} - -## @description Routine to configure any OS-specific settings. -## @audience public -## @stability stable -## @replaceable yes -## @return may exit on failure conditions -function hadoop_os_tricks -{ - local bindv6only - - HADOOP_IS_CYGWIN=false - case ${HADOOP_OS_TYPE} in - Darwin) - if [[ -z "${JAVA_HOME}" ]]; then - if [[ -x /usr/libexec/java_home ]]; then - JAVA_HOME="$(/usr/libexec/java_home)" - export JAVA_HOME - else - JAVA_HOME=/Library/Java/Home - export JAVA_HOME - fi - fi - ;; - Linux) - - # Newer versions of glibc use an arena memory allocator that - # causes virtual # memory usage to explode. This interacts badly - # with the many threads that we use in Hadoop. Tune the variable - # down to prevent vmem explosion. - export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4} - # we put this in QA test mode off so that non-Linux can test - if [[ "${QATESTMODE}" = true ]]; then - return - fi - - # NOTE! HADOOP_ALLOW_IPV6 is a developer hook. We leave it - # undocumented in hadoop-env.sh because we don't want users to - # shoot themselves in the foot while devs make IPv6 work. - - bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null) - - if [[ -n "${bindv6only}" ]] && - [[ "${bindv6only}" -eq "1" ]] && - [[ "${HADOOP_ALLOW_IPV6}" != "yes" ]]; then - hadoop_error "ERROR: \"net.ipv6.bindv6only\" is set to 1 " - hadoop_error "ERROR: Hadoop networking could be broken. Aborting." - hadoop_error "ERROR: For more info: http://wiki.apache.org/hadoop/HadoopIPv6" - exit 1 - fi - ;; - CYGWIN*) - # Flag that we're running on Cygwin to trigger path translation later. - HADOOP_IS_CYGWIN=true - ;; - esac -} - -## @description Configure/verify ${JAVA_HOME} -## @audience public -## @stability stable -## @replaceable yes -## @return may exit on failure conditions -function hadoop_java_setup -{ - # Bail if we did not detect it - if [[ -z "${JAVA_HOME}" ]]; then - hadoop_error "ERROR: JAVA_HOME is not set and could not be found." - exit 1 - fi - - if [[ ! -d "${JAVA_HOME}" ]]; then - hadoop_error "ERROR: JAVA_HOME ${JAVA_HOME} does not exist." - exit 1 - fi - - JAVA="${JAVA_HOME}/bin/java" - - if [[ ! -x "$JAVA" ]]; then - hadoop_error "ERROR: $JAVA is not executable." - exit 1 - fi -} - -## @description Finish Java JNI paths prior to execution -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_finalize_libpaths -{ - if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then - hadoop_translate_cygwin_path JAVA_LIBRARY_PATH - hadoop_add_param HADOOP_OPTS java.library.path \ - "-Djava.library.path=${JAVA_LIBRARY_PATH}" - export LD_LIBRARY_PATH - fi -} - -## @description Finish Java heap parameters prior to execution -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_finalize_hadoop_heap -{ - if [[ -n "${HADOOP_HEAPSIZE_MAX}" ]]; then - if [[ "${HADOOP_HEAPSIZE_MAX}" =~ ^[0-9]+$ ]]; then - HADOOP_HEAPSIZE_MAX="${HADOOP_HEAPSIZE_MAX}m" - fi - hadoop_add_param HADOOP_OPTS Xmx "-Xmx${HADOOP_HEAPSIZE_MAX}" - fi - - # backwards compatibility - if [[ -n "${HADOOP_HEAPSIZE}" ]]; then - if [[ "${HADOOP_HEAPSIZE}" =~ ^[0-9]+$ ]]; then - HADOOP_HEAPSIZE="${HADOOP_HEAPSIZE}m" - fi - hadoop_add_param HADOOP_OPTS Xmx "-Xmx${HADOOP_HEAPSIZE}" - fi - - if [[ -n "${HADOOP_HEAPSIZE_MIN}" ]]; then - if [[ "${HADOOP_HEAPSIZE_MIN}" =~ ^[0-9]+$ ]]; then - HADOOP_HEAPSIZE_MIN="${HADOOP_HEAPSIZE_MIN}m" - fi - hadoop_add_param HADOOP_OPTS Xms "-Xms${HADOOP_HEAPSIZE_MIN}" - fi -} - -## @description Converts the contents of the variable name -## @description `varnameref` into the equivalent Windows path. -## @description If the second parameter is true, then `varnameref` -## @description is treated as though it was a path list. -## @audience public -## @stability stable -## @replaceable yes -## @param varnameref -## @param [true] -function hadoop_translate_cygwin_path -{ - if [[ "${HADOOP_IS_CYGWIN}" = "true" ]]; then - if [[ "$2" = "true" ]]; then - #shellcheck disable=SC2016 - eval "$1"='$(cygpath -p -w "${!1}" 2>/dev/null)' - else - #shellcheck disable=SC2016 - eval "$1"='$(cygpath -w "${!1}" 2>/dev/null)' - fi - fi -} - -## @description Adds the HADOOP_CLIENT_OPTS variable to -## @description HADOOP_OPTS if HADOOP_SUBCMD_SUPPORTDAEMONIZATION is false -## @audience public -## @stability stable -## @replaceable yes -function hadoop_add_client_opts -{ - if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = false - || -z "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" ]]; then - hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS" - HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" - fi -} - -## @description Finish configuring Hadoop specific system properties -## @description prior to executing Java -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_finalize_hadoop_opts -{ - hadoop_translate_cygwin_path HADOOP_LOG_DIR - hadoop_add_param HADOOP_OPTS hadoop.log.dir "-Dhadoop.log.dir=${HADOOP_LOG_DIR}" - hadoop_add_param HADOOP_OPTS hadoop.log.file "-Dhadoop.log.file=${HADOOP_LOGFILE}" - hadoop_translate_cygwin_path HADOOP_HOME - export HADOOP_HOME - hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_HOME}" - hadoop_add_param HADOOP_OPTS hadoop.id.str "-Dhadoop.id.str=${HADOOP_IDENT_STRING}" - hadoop_add_param HADOOP_OPTS hadoop.root.logger "-Dhadoop.root.logger=${HADOOP_ROOT_LOGGER}" - hadoop_add_param HADOOP_OPTS hadoop.policy.file "-Dhadoop.policy.file=${HADOOP_POLICYFILE}" - hadoop_add_param HADOOP_OPTS hadoop.security.logger "-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER}" -} - -## @description Finish Java classpath prior to execution -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_finalize_classpath -{ - hadoop_add_classpath "${HADOOP_CONF_DIR}" before - - # user classpath gets added at the last minute. this allows - # override of CONF dirs and more - hadoop_add_to_classpath_userpath - hadoop_translate_cygwin_path CLASSPATH true -} - -## @description Finish all the remaining environment settings prior -## @description to executing Java. This is a wrapper that calls -## @description the other `finalize` routines. -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_finalize -{ - hadoop_shellprofiles_finalize - - hadoop_finalize_classpath - hadoop_finalize_libpaths - hadoop_finalize_hadoop_heap - hadoop_finalize_hadoop_opts - - hadoop_translate_cygwin_path HADOOP_HOME - hadoop_translate_cygwin_path HADOOP_CONF_DIR - hadoop_translate_cygwin_path HADOOP_COMMON_HOME - hadoop_translate_cygwin_path HADOOP_HDFS_HOME - hadoop_translate_cygwin_path HADOOP_YARN_HOME - hadoop_translate_cygwin_path HADOOP_MAPRED_HOME -} - -## @description Print usage information and exit with the passed -## @description `exitcode` -## @audience public -## @stability stable -## @replaceable no -## @param exitcode -## @return This function will always exit. -function hadoop_exit_with_usage -{ - local exitcode=$1 - if [[ -z $exitcode ]]; then - exitcode=1 - fi - # shellcheck disable=SC2034 - if declare -F hadoop_usage >/dev/null ; then - hadoop_usage - elif [[ -x /usr/bin/cowsay ]]; then - /usr/bin/cowsay -f elephant "Sorry, no help available." - else - hadoop_error "Sorry, no help available." - fi - exit $exitcode -} - -## @description Verify that prerequisites have been met prior to -## @description excuting a privileged program. -## @audience private -## @stability evolving -## @replaceable yes -## @return This routine may exit. -function hadoop_verify_secure_prereq -{ - # if you are on an OS like Illumos that has functional roles - # and you are using pfexec, you'll probably want to change - # this. - - if ! hadoop_privilege_check && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then - hadoop_error "ERROR: You must be a privileged user in order to run a secure service." - exit 1 - else - return 0 - fi -} - -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_setup_secure_service -{ - # need a more complicated setup? replace me! - - HADOOP_PID_DIR=${HADOOP_SECURE_PID_DIR} - HADOOP_LOG_DIR=${HADOOP_SECURE_LOG_DIR} -} - -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_verify_piddir -{ - if [[ -z "${HADOOP_PID_DIR}" ]]; then - hadoop_error "No pid directory defined." - exit 1 - fi - hadoop_mkdir "${HADOOP_PID_DIR}" - touch "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1 - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Unable to write in ${HADOOP_PID_DIR}. Aborting." - exit 1 - fi - rm "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1 -} - -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_verify_logdir -{ - if [[ -z "${HADOOP_LOG_DIR}" ]]; then - hadoop_error "No log directory defined." - exit 1 - fi - hadoop_mkdir "${HADOOP_LOG_DIR}" - touch "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1 - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Unable to write in ${HADOOP_LOG_DIR}. Aborting." - exit 1 - fi - rm "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1 -} - -## @description Determine the status of the daemon referenced -## @description by `pidfile` -## @audience public -## @stability stable -## @replaceable yes -## @param pidfile -## @return (mostly) LSB 4.1.0 compatible status -function hadoop_status_daemon -{ - # - # LSB 4.1.0 compatible status command (1) - # - # 0 = program is running - # 1 = dead, but still a pid (2) - # 2 = (not used by us) - # 3 = not running - # - # 1 - this is not an endorsement of the LSB - # - # 2 - technically, the specification says /var/run/pid, so - # we should never return this value, but we're giving - # them the benefit of a doubt and returning 1 even if - # our pid is not in in /var/run . - # - - local pidfile=$1 - shift - - local pid - local pspid - - if [[ -f "${pidfile}" ]]; then - pid=$(cat "${pidfile}") - if pspid=$(ps -o args= -p"${pid}" 2>/dev/null); then - # this is to check that the running process we found is actually the same - # daemon that we're interested in - if [[ ${pspid} =~ -Dproc_${daemonname} ]]; then - return 0 - fi - fi - return 1 - fi - return 3 -} - -## @description Execute the Java `class`, passing along any `options`. -## @description Additionally, set the Java property -Dproc_`command`. -## @audience public -## @stability stable -## @replaceable yes -## @param command -## @param class -## @param [options] -function hadoop_java_exec -{ - # run a java command. this is used for - # non-daemons - - local command=$1 - local class=$2 - shift 2 - - hadoop_debug "Final CLASSPATH: ${CLASSPATH}" - hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}" - hadoop_debug "Final JAVA_HOME: ${JAVA_HOME}" - hadoop_debug "java: ${JAVA}" - hadoop_debug "Class name: ${class}" - hadoop_debug "Command line options: $*" - - export CLASSPATH - #shellcheck disable=SC2086 - exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@" -} - -## @description Start a non-privileged daemon in the foreground. -## @audience private -## @stability evolving -## @replaceable yes -## @param command -## @param class -## @param pidfile -## @param [options] -function hadoop_start_daemon -{ - # this is our non-privileged daemon starter - # that fires up a daemon in the *foreground* - # so complex! so wow! much java! - local command=$1 - local class=$2 - local pidfile=$3 - shift 3 - - hadoop_debug "Final CLASSPATH: ${CLASSPATH}" - hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}" - hadoop_debug "Final JAVA_HOME: ${JAVA_HOME}" - hadoop_debug "java: ${JAVA}" - hadoop_debug "Class name: ${class}" - hadoop_debug "Command line options: $*" - - # this is for the non-daemon pid creation - #shellcheck disable=SC2086 - echo $$ > "${pidfile}" 2>/dev/null - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot write ${command} pid ${pidfile}." - fi - - export CLASSPATH - #shellcheck disable=SC2086 - exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@" -} - -## @description Start a non-privileged daemon in the background. -## @audience private -## @stability evolving -## @replaceable yes -## @param command -## @param class -## @param pidfile -## @param outfile -## @param [options] -function hadoop_start_daemon_wrapper -{ - local daemonname=$1 - local class=$2 - local pidfile=$3 - local outfile=$4 - shift 4 - - local counter - - hadoop_rotate_log "${outfile}" - - hadoop_start_daemon "${daemonname}" \ - "$class" \ - "${pidfile}" \ - "$@" >> "${outfile}" 2>&1 < /dev/null & - - # we need to avoid a race condition here - # so let's wait for the fork to finish - # before overriding with the daemonized pid - (( counter=0 )) - while [[ ! -f ${pidfile} && ${counter} -le 5 ]]; do - sleep 1 - (( counter++ )) - done - - # this is for daemon pid creation - #shellcheck disable=SC2086 - echo $! > "${pidfile}" 2>/dev/null - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot write ${daemonname} pid ${pidfile}." - fi - - # shellcheck disable=SC2086 - renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1 - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot set priority of ${daemonname} process $!" - fi - - # shellcheck disable=SC2086 - disown %+ >/dev/null 2>&1 - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!" - fi - sleep 1 - - # capture the ulimit output - ulimit -a >> "${outfile}" 2>&1 - - # shellcheck disable=SC2086 - if ! ps -p $! >/dev/null 2>&1; then - return 1 - fi - return 0 -} - -## @description Start a privileged daemon in the foreground. -## @audience private -## @stability evolving -## @replaceable yes -## @param command -## @param class -## @param daemonpidfile -## @param daemonoutfile -## @param daemonerrfile -## @param wrapperpidfile -## @param [options] -function hadoop_start_secure_daemon -{ - # this is used to launch a secure daemon in the *foreground* - # - local daemonname=$1 - local class=$2 - - # pid file to create for our daemon - local daemonpidfile=$3 - - # where to send stdout. jsvc has bad habits so this *may* be &1 - # which means you send it to stdout! - local daemonoutfile=$4 - - # where to send stderr. same thing, except &2 = stderr - local daemonerrfile=$5 - local privpidfile=$6 - shift 6 - - hadoop_rotate_log "${daemonoutfile}" - hadoop_rotate_log "${daemonerrfile}" - - # shellcheck disable=SC2153 - jsvc="${JSVC_HOME}/jsvc" - if [[ ! -f "${jsvc}" ]]; then - hadoop_error "JSVC_HOME is not set or set incorrectly. jsvc is required to run secure" - hadoop_error "or privileged daemons. Please download and install jsvc from " - hadoop_error "http://archive.apache.org/dist/commons/daemon/binaries/ " - hadoop_error "and set JSVC_HOME to the directory containing the jsvc binary." - exit 1 - fi - - # note that shellcheck will throw a - # bogus for-our-use-case 2086 here. - # it doesn't properly support multi-line situations - - hadoop_debug "Final CLASSPATH: ${CLASSPATH}" - hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}" - hadoop_debug "Final JSVC_HOME: ${JSVC_HOME}" - hadoop_debug "jsvc: ${jsvc}" - hadoop_debug "Final HADOOP_DAEMON_JSVC_EXTRA_OPTS: ${HADOOP_DAEMON_JSVC_EXTRA_OPTS}" - hadoop_debug "Class name: ${class}" - hadoop_debug "Command line options: $*" - - #shellcheck disable=SC2086 - echo $$ > "${privpidfile}" 2>/dev/null - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot write ${daemonname} pid ${privpidfile}." - fi - - # shellcheck disable=SC2086 - exec "${jsvc}" \ - "-Dproc_${daemonname}" \ - ${HADOOP_DAEMON_JSVC_EXTRA_OPTS} \ - -outfile "${daemonoutfile}" \ - -errfile "${daemonerrfile}" \ - -pidfile "${daemonpidfile}" \ - -nodetach \ - -user "${HADOOP_SECURE_USER}" \ - -cp "${CLASSPATH}" \ - ${HADOOP_OPTS} \ - "${class}" "$@" -} - -## @description Start a privileged daemon in the background. -## @audience private -## @stability evolving -## @replaceable yes -## @param command -## @param class -## @param daemonpidfile -## @param daemonoutfile -## @param wrapperpidfile -## @param warpperoutfile -## @param daemonerrfile -## @param [options] -function hadoop_start_secure_daemon_wrapper -{ - # this wraps hadoop_start_secure_daemon to take care - # of the dirty work to launch a daemon in the background! - local daemonname=$1 - local class=$2 - - # same rules as hadoop_start_secure_daemon except we - # have some additional parameters - - local daemonpidfile=$3 - - local daemonoutfile=$4 - - # the pid file of the subprocess that spawned our - # secure launcher - local jsvcpidfile=$5 - - # the output of the subprocess that spawned our secure - # launcher - local jsvcoutfile=$6 - - local daemonerrfile=$7 - shift 7 - - local counter - - hadoop_rotate_log "${jsvcoutfile}" - - hadoop_start_secure_daemon \ - "${daemonname}" \ - "${class}" \ - "${daemonpidfile}" \ - "${daemonoutfile}" \ - "${daemonerrfile}" \ - "${jsvcpidfile}" "$@" >> "${jsvcoutfile}" 2>&1 < /dev/null & - - # we need to avoid a race condition here - # so let's wait for the fork to finish - # before overriding with the daemonized pid - (( counter=0 )) - while [[ ! -f ${daemonpidfile} && ${counter} -le 5 ]]; do - sleep 1 - (( counter++ )) - done - - #shellcheck disable=SC2086 - if ! echo $! > "${jsvcpidfile}"; then - hadoop_error "ERROR: Cannot write ${daemonname} pid ${jsvcpidfile}." - fi - - sleep 1 - #shellcheck disable=SC2086 - renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1 - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot set priority of ${daemonname} process $!" - fi - if [[ -f "${daemonpidfile}" ]]; then - #shellcheck disable=SC2046 - renice "${HADOOP_NICENESS}" $(cat "${daemonpidfile}" 2>/dev/null) >/dev/null 2>&1 - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot set priority of ${daemonname} process $(cat "${daemonpidfile}" 2>/dev/null)" - fi - fi - #shellcheck disable=SC2046 - disown %+ >/dev/null 2>&1 - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!" - fi - # capture the ulimit output - su "${HADOOP_SECURE_USER}" -c 'bash -c "ulimit -a"' >> "${jsvcoutfile}" 2>&1 - #shellcheck disable=SC2086 - if ! ps -p $! >/dev/null 2>&1; then - return 1 - fi - return 0 -} - -## @description Wait till process dies or till timeout -## @audience private -## @stability evolving -## @param pid -## @param timeout -function wait_process_to_die_or_timeout -{ - local pid=$1 - local timeout=$2 - - # Normalize timeout - # Round up or down - timeout=$(printf "%.0f\n" "${timeout}") - if [[ ${timeout} -lt 1 ]]; then - # minimum 1 second - timeout=1 - fi - - # Wait to see if it's still alive - for (( i=0; i < "${timeout}"; i++ )) - do - if kill -0 "${pid}" > /dev/null 2>&1; then - sleep 1 - else - break - fi - done -} - -## @description Stop the non-privileged `command` daemon with that -## @description that is running at `pidfile`. -## @audience public -## @stability stable -## @replaceable yes -## @param command -## @param pidfile -function hadoop_stop_daemon -{ - local cmd=$1 - local pidfile=$2 - shift 2 - - local pid - local cur_pid - - if [[ -f "${pidfile}" ]]; then - pid=$(cat "$pidfile") - - kill "${pid}" >/dev/null 2>&1 - - wait_process_to_die_or_timeout "${pid}" "${HADOOP_STOP_TIMEOUT}" - - if kill -0 "${pid}" > /dev/null 2>&1; then - hadoop_error "WARNING: ${cmd} did not stop gracefully after ${HADOOP_STOP_TIMEOUT} seconds: Trying to kill with kill -9" - kill -9 "${pid}" >/dev/null 2>&1 - fi - wait_process_to_die_or_timeout "${pid}" "${HADOOP_STOP_TIMEOUT}" - if ps -p "${pid}" > /dev/null 2>&1; then - hadoop_error "ERROR: Unable to kill ${pid}" - else - cur_pid=$(cat "$pidfile") - if [[ "${pid}" = "${cur_pid}" ]]; then - rm -f "${pidfile}" >/dev/null 2>&1 - else - hadoop_error "WARNING: pid has changed for ${cmd}, skip deleting pid file" - fi - fi - fi -} - -## @description Stop the privileged `command` daemon with that -## @description that is running at `daemonpidfile` and launched with -## @description the wrapper at `wrapperpidfile`. -## @audience public -## @stability stable -## @replaceable yes -## @param command -## @param daemonpidfile -## @param wrapperpidfile -function hadoop_stop_secure_daemon -{ - local command=$1 - local daemonpidfile=$2 - local privpidfile=$3 - shift 3 - local ret - - local daemon_pid - local priv_pid - local cur_daemon_pid - local cur_priv_pid - - daemon_pid=$(cat "$daemonpidfile") - priv_pid=$(cat "$privpidfile") - - hadoop_stop_daemon "${command}" "${daemonpidfile}" - ret=$? - - cur_daemon_pid=$(cat "$daemonpidfile") - cur_priv_pid=$(cat "$privpidfile") - - if [[ "${daemon_pid}" = "${cur_daemon_pid}" ]]; then - rm -f "${daemonpidfile}" >/dev/null 2>&1 - else - hadoop_error "WARNING: daemon pid has changed for ${command}, skip deleting daemon pid file" - fi - - if [[ "${priv_pid}" = "${cur_priv_pid}" ]]; then - rm -f "${privpidfile}" >/dev/null 2>&1 - else - hadoop_error "WARNING: priv pid has changed for ${command}, skip deleting priv pid file" - fi - return ${ret} -} - -## @description Manage a non-privileged daemon. -## @audience private -## @stability evolving -## @replaceable yes -## @param [start|stop|status|default] -## @param command -## @param class -## @param daemonpidfile -## @param daemonoutfile -## @param [options] -function hadoop_daemon_handler -{ - local daemonmode=$1 - local daemonname=$2 - local class=$3 - local daemon_pidfile=$4 - local daemon_outfile=$5 - shift 5 - - case ${daemonmode} in - status) - hadoop_status_daemon "${daemon_pidfile}" - exit $? - ;; - - stop) - hadoop_stop_daemon "${daemonname}" "${daemon_pidfile}" - exit $? - ;; - - ##COMPAT -- older hadoops would also start daemons by default - start|default) - hadoop_verify_piddir - hadoop_verify_logdir - hadoop_status_daemon "${daemon_pidfile}" - if [[ $? == 0 ]]; then - hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}"). Stop it first." - exit 1 - else - # stale pid file, so just remove it and continue on - rm -f "${daemon_pidfile}" >/dev/null 2>&1 - fi - ##COMPAT - differenticate between --daemon start and nothing - # "nothing" shouldn't detach - if [[ "$daemonmode" = "default" ]]; then - hadoop_start_daemon "${daemonname}" "${class}" "${daemon_pidfile}" "$@" - else - hadoop_start_daemon_wrapper "${daemonname}" \ - "${class}" "${daemon_pidfile}" "${daemon_outfile}" "$@" - fi - ;; - esac -} - -## @description Manage a privileged daemon. -## @audience private -## @stability evolving -## @replaceable yes -## @param [start|stop|status|default] -## @param command -## @param class -## @param daemonpidfile -## @param daemonoutfile -## @param wrapperpidfile -## @param wrapperoutfile -## @param wrappererrfile -## @param [options] -function hadoop_secure_daemon_handler -{ - local daemonmode=$1 - local daemonname=$2 - local classname=$3 - local daemon_pidfile=$4 - local daemon_outfile=$5 - local priv_pidfile=$6 - local priv_outfile=$7 - local priv_errfile=$8 - shift 8 - - case ${daemonmode} in - status) - hadoop_status_daemon "${daemon_pidfile}" - exit $? - ;; - - stop) - hadoop_stop_secure_daemon "${daemonname}" \ - "${daemon_pidfile}" "${priv_pidfile}" - exit $? - ;; - - ##COMPAT -- older hadoops would also start daemons by default - start|default) - hadoop_verify_piddir - hadoop_verify_logdir - hadoop_status_daemon "${daemon_pidfile}" - if [[ $? == 0 ]]; then - hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}"). Stop it first." - exit 1 - else - # stale pid file, so just remove it and continue on - rm -f "${daemon_pidfile}" >/dev/null 2>&1 - fi - - ##COMPAT - differenticate between --daemon start and nothing - # "nothing" shouldn't detach - if [[ "${daemonmode}" = "default" ]]; then - hadoop_start_secure_daemon "${daemonname}" "${classname}" \ - "${daemon_pidfile}" "${daemon_outfile}" \ - "${priv_errfile}" "${priv_pidfile}" "$@" - else - hadoop_start_secure_daemon_wrapper "${daemonname}" "${classname}" \ - "${daemon_pidfile}" "${daemon_outfile}" \ - "${priv_pidfile}" "${priv_outfile}" "${priv_errfile}" "$@" - fi - ;; - esac -} - -## @description autodetect whether this is a priv subcmd -## @description by whether or not a priv user var exists -## @description and if HADOOP_SECURE_CLASSNAME is defined -## @audience public -## @stability stable -## @replaceable yes -## @param command -## @param subcommand -## @return 1 = not priv -## @return 0 = priv -function hadoop_detect_priv_subcmd -{ - declare program=$1 - declare command=$2 - - if [[ -z "${HADOOP_SECURE_CLASSNAME}" ]]; then - hadoop_debug "No secure classname defined." - return 1 - fi - - uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_USER) - if [[ -z "${!uvar}" ]]; then - hadoop_debug "No secure user defined." - return 1 - fi - return 0 -} - -## @description Build custom subcommand var -## @audience public -## @stability stable -## @replaceable yes -## @param command -## @param subcommand -## @param customid -## @return string -function hadoop_build_custom_subcmd_var -{ - declare program=$1 - declare command=$2 - declare custom=$3 - declare uprogram - declare ucommand - - if [[ -z "${BASH_VERSINFO[0]}" ]] \ - || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then - uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]') - ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]') - else - uprogram=${program^^} - ucommand=${command^^} - fi - - echo "${uprogram}_${ucommand}_${custom}" -} - -## @description Verify that username in a var converts to user id -## @audience public -## @stability stable -## @replaceable yes -## @param userstring -## @return 0 for success -## @return 1 for failure -function hadoop_verify_user_resolves -{ - declare userstr=$1 - - if [[ -z ${userstr} || -z ${!userstr} ]] ; then - return 1 - fi - - id -u "${!userstr}" >/dev/null 2>&1 -} - -## @description Verify that ${USER} is allowed to execute the -## @description given subcommand. -## @audience public -## @stability stable -## @replaceable yes -## @param command -## @param subcommand -## @return return 0 on success -## @return exit 1 on failure -function hadoop_verify_user_perm -{ - declare program=$1 - declare command=$2 - declare uvar - - if [[ ${command} =~ \. ]]; then - return 1 - fi - - uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER) - - if [[ -n ${!uvar} ]]; then - if [[ ${!uvar} != "${USER}" ]]; then - hadoop_error "ERROR: ${command} can only be executed by ${!uvar}." - exit 1 - fi - fi - return 0 -} - -## @description Verify that ${USER} is allowed to execute the -## @description given subcommand. -## @audience public -## @stability stable -## @replaceable yes -## @param subcommand -## @return 1 on no re-exec needed -## @return 0 on need to re-exec -function hadoop_need_reexec -{ - declare program=$1 - declare command=$2 - declare uvar - - # we've already been re-execed, bail - - if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then - return 1 - fi - - if [[ ${command} =~ \. ]]; then - return 1 - fi - - # if we have privilege, and the _USER is defined, and _USER is - # set to someone who isn't us, then yes, we should re-exec. - # otherwise no, don't re-exec and let the system deal with it. - - if hadoop_privilege_check; then - uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER) - if [[ -n ${!uvar} ]]; then - if [[ ${!uvar} != "${USER}" ]]; then - return 0 - fi - fi - fi - return 1 -} - -## @description Add custom (program)_(command)_OPTS to HADOOP_OPTS. -## @description Also handles the deprecated cases from pre-3.x. -## @audience public -## @stability evolving -## @replaceable yes -## @param program -## @param subcommand -## @return will exit on failure conditions -function hadoop_subcommand_opts -{ - declare program=$1 - declare command=$2 - declare uvar - declare depvar - declare uprogram - declare ucommand - - if [[ -z "${program}" || -z "${command}" ]]; then - return 1 - fi - - if [[ ${command} =~ \. ]]; then - return 1 - fi - - # bash 4 and up have built-in ways to upper and lower - # case the contents of vars. This is faster than - # calling tr. - - ## We don't call hadoop_build_custom_subcmd_var here - ## since we need to construct this for the deprecation - ## cases. For Hadoop 4.x, this needs to get cleaned up. - - if [[ -z "${BASH_VERSINFO[0]}" ]] \ - || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then - uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]') - ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]') - else - uprogram=${program^^} - ucommand=${command^^} - fi - - uvar="${uprogram}_${ucommand}_OPTS" - - # Let's handle all of the deprecation cases early - # HADOOP_NAMENODE_OPTS -> HDFS_NAMENODE_OPTS - - depvar="HADOOP_${ucommand}_OPTS" - - if [[ "${depvar}" != "${uvar}" ]]; then - if [[ -n "${!depvar}" ]]; then - hadoop_deprecate_envvar "${depvar}" "${uvar}" - fi - fi - - if [[ -n ${!uvar} ]]; then - hadoop_debug "Appending ${uvar} onto HADOOP_OPTS" - HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}" - return 0 - fi -} - -## @description Add custom (program)_(command)_SECURE_EXTRA_OPTS to HADOOP_OPTS. -## @description This *does not* handle the pre-3.x deprecated cases -## @audience public -## @stability stable -## @replaceable yes -## @param program -## @param subcommand -## @return will exit on failure conditions -function hadoop_subcommand_secure_opts -{ - declare program=$1 - declare command=$2 - declare uvar - declare uprogram - declare ucommand - - if [[ -z "${program}" || -z "${command}" ]]; then - return 1 - fi - - # HDFS_DATANODE_SECURE_EXTRA_OPTS - # HDFS_NFS3_SECURE_EXTRA_OPTS - # ... - uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_EXTRA_OPTS) - - if [[ -n ${!uvar} ]]; then - hadoop_debug "Appending ${uvar} onto HADOOP_OPTS" - HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}" - return 0 - fi -} - -## @description Perform the 'hadoop classpath', etc subcommand with the given -## @description parameters -## @audience private -## @stability evolving -## @replaceable yes -## @param [parameters] -## @return will print & exit with no params -function hadoop_do_classpath_subcommand -{ - if [[ "$#" -gt 1 ]]; then - eval "$1"=org.apache.hadoop.util.Classpath - else - hadoop_finalize - echo "${CLASSPATH}" - exit 0 - fi -} - -## @description generic shell script option parser. sets -## @description HADOOP_PARSE_COUNTER to set number the -## @description caller should shift -## @audience private -## @stability evolving -## @replaceable yes -## @param [parameters, typically "$@"] -function hadoop_parse_args -{ - HADOOP_DAEMON_MODE="default" - HADOOP_PARSE_COUNTER=0 - - # not all of the options supported here are supported by all commands - # however these are: - hadoop_add_option "--config dir" "Hadoop config directory" - hadoop_add_option "--debug" "turn on shell script debug mode" - hadoop_add_option "--help" "usage information" - - while true; do - hadoop_debug "hadoop_parse_args: processing $1" - case $1 in - --buildpaths) - HADOOP_ENABLE_BUILD_PATHS=true - shift - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1)) - ;; - --config) - shift - confdir=$1 - shift - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2)) - if [[ -d "${confdir}" ]]; then - HADOOP_CONF_DIR="${confdir}" - elif [[ -z "${confdir}" ]]; then - hadoop_error "ERROR: No parameter provided for --config " - hadoop_exit_with_usage 1 - else - hadoop_error "ERROR: Cannot find configuration directory \"${confdir}\"" - hadoop_exit_with_usage 1 - fi - ;; - --daemon) - shift - HADOOP_DAEMON_MODE=$1 - shift - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2)) - if [[ -z "${HADOOP_DAEMON_MODE}" || \ - ! "${HADOOP_DAEMON_MODE}" =~ ^st(art|op|atus)$ ]]; then - hadoop_error "ERROR: --daemon must be followed by either \"start\", \"stop\", or \"status\"." - hadoop_exit_with_usage 1 - fi - ;; - --debug) - shift - HADOOP_SHELL_SCRIPT_DEBUG=true - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1)) - ;; - --help|-help|-h|help|--h|--\?|-\?|\?) - hadoop_exit_with_usage 0 - ;; - --hostnames) - shift - HADOOP_WORKER_NAMES="$1" - shift - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2)) - ;; - --hosts) - shift - hadoop_populate_workers_file "$1" - shift - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2)) - ;; - --loglevel) - shift - # shellcheck disable=SC2034 - HADOOP_LOGLEVEL="$1" - shift - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2)) - ;; - --reexec) - shift - if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then - hadoop_error "ERROR: re-exec fork bomb prevention: --reexec already called" - exit 1 - fi - HADOOP_REEXECED_CMD=true - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1)) - ;; - --workers) - shift - # shellcheck disable=SC2034 - HADOOP_WORKER_MODE=true - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1)) - ;; - *) - break - ;; - esac - done - - hadoop_debug "hadoop_parse: asking caller to skip ${HADOOP_PARSE_COUNTER}" -} - -## @description Handle subcommands from main program entries -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_generic_java_subcmd_handler -{ - declare priv_outfile - declare priv_errfile - declare priv_pidfile - declare daemon_outfile - declare daemon_pidfile - declare secureuser - - # The default/expected way to determine if a daemon is going to run in secure - # mode is defined by hadoop_detect_priv_subcmd. If this returns true - # then setup the secure user var and tell the world we're in secure mode - - if hadoop_detect_priv_subcmd "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"; then - HADOOP_SUBCMD_SECURESERVICE=true - secureuser=$(hadoop_build_custom_subcmd_var "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" SECURE_USER) - - if ! hadoop_verify_user_resolves "${secureuser}"; then - hadoop_error "ERROR: User defined in ${secureuser} (${!secureuser}) does not exist. Aborting." - exit 1 - fi - - HADOOP_SECURE_USER="${!secureuser}" - fi - - # check if we're running in secure mode. - # breaking this up from the above lets 3rd parties - # do things a bit different - # secure services require some extra setup - # if yes, then we need to define all of the priv and daemon stuff - # if not, then we just need to define daemon stuff. - # note the daemon vars are purposefully different between the two - - if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then - - hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" - - hadoop_verify_secure_prereq - hadoop_setup_secure_service - priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out" - priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.err" - priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid" - daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out" - daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid" - else - daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out" - daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid" - fi - - # are we actually in daemon mode? - # if yes, use the daemon logger and the appropriate log file. - if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then - HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}" - if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then - HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log" - else - HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log" - fi - fi - - # finish defining the environment: system properties, env vars, class paths, etc. - hadoop_finalize - - # do the hard work of launching a daemon or just executing our interactive - # java class - if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = true ]]; then - if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then - hadoop_secure_daemon_handler \ - "${HADOOP_DAEMON_MODE}" \ - "${HADOOP_SUBCMD}" \ - "${HADOOP_SECURE_CLASSNAME}" \ - "${daemon_pidfile}" \ - "${daemon_outfile}" \ - "${priv_pidfile}" \ - "${priv_outfile}" \ - "${priv_errfile}" \ - "${HADOOP_SUBCMD_ARGS[@]}" - else - hadoop_daemon_handler \ - "${HADOOP_DAEMON_MODE}" \ - "${HADOOP_SUBCMD}" \ - "${HADOOP_CLASSNAME}" \ - "${daemon_pidfile}" \ - "${daemon_outfile}" \ - "${HADOOP_SUBCMD_ARGS[@]}" - fi - exit $? - else - hadoop_java_exec "${HADOOP_SUBCMD}" "${HADOOP_CLASSNAME}" "${HADOOP_SUBCMD_ARGS[@]}" - fi -} diff --git a/hadoop-hdds/common/src/main/bin/workers.sh b/hadoop-hdds/common/src/main/bin/workers.sh deleted file mode 100755 index 05bc5fd8f0fe0..0000000000000 --- a/hadoop-hdds/common/src/main/bin/workers.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Run a shell command on all worker hosts. -# -# Environment Variables -# -# HADOOP_WORKERS File naming remote hosts. -# Default is ${HADOOP_CONF_DIR}/workers. -# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf. -# HADOOP_WORKER_SLEEP Seconds to sleep between spawning remote commands. -# HADOOP_SSH_OPTS Options passed to ssh when running remote commands. -## - -function hadoop_usage -{ - echo "Usage: workers.sh [--config confdir] command..." -} - -# let's locate libexec... -if [[ -n "${HADOOP_HOME}" ]]; then - HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" -else - this="${BASH_SOURCE-$0}" - bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) - HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec" -fi - -HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}" -# shellcheck disable=SC2034 -HADOOP_NEW_CONFIG=true -if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then - . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" -else - echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1 - exit 1 -fi - -# if no args specified, show usage -if [[ $# -le 0 ]]; then - hadoop_exit_with_usage 1 -fi - -hadoop_connect_to_hosts "$@" diff --git a/hadoop-hdds/common/src/main/conf/core-site.xml b/hadoop-hdds/common/src/main/conf/core-site.xml deleted file mode 100644 index d2ddf893e49eb..0000000000000 --- a/hadoop-hdds/common/src/main/conf/core-site.xml +++ /dev/null @@ -1,20 +0,0 @@ - - - - - - - - diff --git a/hadoop-hdds/common/src/main/conf/hadoop-env.cmd b/hadoop-hdds/common/src/main/conf/hadoop-env.cmd deleted file mode 100644 index 971869597f529..0000000000000 --- a/hadoop-hdds/common/src/main/conf/hadoop-env.cmd +++ /dev/null @@ -1,90 +0,0 @@ -@echo off -@rem Licensed to the Apache Software Foundation (ASF) under one or more -@rem contributor license agreements. See the NOTICE file distributed with -@rem this work for additional information regarding copyright ownership. -@rem The ASF licenses this file to You under the Apache License, Version 2.0 -@rem (the "License"); you may not use this file except in compliance with -@rem the License. You may obtain a copy of the License at -@rem -@rem http://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. - -@rem Set Hadoop-specific environment variables here. - -@rem The only required environment variable is JAVA_HOME. All others are -@rem optional. When running a distributed configuration it is best to -@rem set JAVA_HOME in this file, so that it is correctly defined on -@rem remote nodes. - -@rem The java implementation to use. Required. -set JAVA_HOME=%JAVA_HOME% - -@rem The jsvc implementation to use. Jsvc is required to run secure datanodes. -@rem set JSVC_HOME=%JSVC_HOME% - -@rem set HADOOP_CONF_DIR= - -@rem Extra Java CLASSPATH elements. Automatically insert capacity-scheduler. -if exist %HADOOP_HOME%\contrib\capacity-scheduler ( - if not defined HADOOP_CLASSPATH ( - set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar - ) else ( - set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar - ) -) - -@rem The maximum amount of heap to use, in MB. Default is 1000. -@rem set HADOOP_HEAPSIZE= -@rem set HADOOP_NAMENODE_INIT_HEAPSIZE="" - -@rem Extra Java runtime options. Empty by default. -@rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true - -@rem Command specific options appended to HADOOP_OPTS when specified -if not defined HADOOP_SECURITY_LOGGER ( - set HADOOP_SECURITY_LOGGER=INFO,RFAS -) -if not defined HDFS_AUDIT_LOGGER ( - set HDFS_AUDIT_LOGGER=INFO,NullAppender -) - -set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS% -set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS% -set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS% - -@rem The following applies to multiple commands (fs, dfs, fsck, distcp etc) -set HADOOP_CLIENT_OPTS=-Xmx512m %HADOOP_CLIENT_OPTS% -@rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%" - -@rem On secure datanodes, user to run the datanode as after dropping privileges -set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER% - -@rem Where log files are stored. %HADOOP_HOME%/logs by default. -@rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME% - -@rem Where log files are stored in the secure data environment. -set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER% - -@rem -@rem Router-based HDFS Federation specific parameters -@rem Specify the JVM options to be used when starting the RBF Routers. -@rem These options will be appended to the options specified as HADOOP_OPTS -@rem and therefore may override any similar flags set in HADOOP_OPTS -@rem -@rem set HADOOP_DFSROUTER_OPTS="" -@rem - -@rem The directory where pid files are stored. /tmp by default. -@rem NOTE: this should be set to a directory that can only be written to by -@rem the user that will run the hadoop daemons. Otherwise there is the -@rem potential for a symlink attack. -set HADOOP_PID_DIR=%HADOOP_PID_DIR% -set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR% - -@rem A string representing this instance of hadoop. %USERNAME% by default. -set HADOOP_IDENT_STRING=%USERNAME% diff --git a/hadoop-hdds/common/src/main/conf/hadoop-env.sh b/hadoop-hdds/common/src/main/conf/hadoop-env.sh deleted file mode 100644 index e43cd95b047ee..0000000000000 --- a/hadoop-hdds/common/src/main/conf/hadoop-env.sh +++ /dev/null @@ -1,439 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Set Hadoop-specific environment variables here. - -## -## THIS FILE ACTS AS THE MASTER FILE FOR ALL HADOOP PROJECTS. -## SETTINGS HERE WILL BE READ BY ALL HADOOP COMMANDS. THEREFORE, -## ONE CAN USE THIS FILE TO SET YARN, HDFS, AND MAPREDUCE -## CONFIGURATION OPTIONS INSTEAD OF xxx-env.sh. -## -## Precedence rules: -## -## {yarn-env.sh|hdfs-env.sh} > hadoop-env.sh > hard-coded defaults -## -## {YARN_xyz|HDFS_xyz} > HADOOP_xyz > hard-coded defaults -## - -# Many of the options here are built from the perspective that users -# may want to provide OVERWRITING values on the command line. -# For example: -# -# JAVA_HOME=/usr/java/testing hdfs dfs -ls -# -# Therefore, the vast majority (BUT NOT ALL!) of these defaults -# are configured for substitution and not append. If append -# is preferable, modify this file accordingly. - -### -# Generic settings for HADOOP -### - -# Technically, the only required environment variable is JAVA_HOME. -# All others are optional. However, the defaults are probably not -# preferred. Many sites configure these options outside of Hadoop, -# such as in /etc/profile.d - -# The java implementation to use. By default, this environment -# variable is REQUIRED on ALL platforms except OS X! -# export JAVA_HOME= - -# Location of Hadoop. By default, Hadoop will attempt to determine -# this location based upon its execution path. -# export HADOOP_HOME= - -# Location of Hadoop's configuration information. i.e., where this -# file is living. If this is not defined, Hadoop will attempt to -# locate it based upon its execution path. -# -# NOTE: It is recommend that this variable not be set here but in -# /etc/profile.d or equivalent. Some options (such as -# --config) may react strangely otherwise. -# -# export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop - -# The maximum amount of heap to use (Java -Xmx). If no unit -# is provided, it will be converted to MB. Daemons will -# prefer any Xmx setting in their respective _OPT variable. -# There is no default; the JVM will autoscale based upon machine -# memory size. -# export HADOOP_HEAPSIZE_MAX= - -# The minimum amount of heap to use (Java -Xms). If no unit -# is provided, it will be converted to MB. Daemons will -# prefer any Xms setting in their respective _OPT variable. -# There is no default; the JVM will autoscale based upon machine -# memory size. -# export HADOOP_HEAPSIZE_MIN= - -# Enable extra debugging of Hadoop's JAAS binding, used to set up -# Kerberos security. -# export HADOOP_JAAS_DEBUG=true - -# Extra Java runtime options for all Hadoop commands. We don't support -# IPv6 yet/still, so by default the preference is set to IPv4. -# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true" -# For Kerberos debugging, an extended option set logs more information -# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug" - -# Some parts of the shell code may do special things dependent upon -# the operating system. We have to set this here. See the next -# section as to why.... -export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)} - -# Extra Java runtime options for some Hadoop commands -# and clients (i.e., hdfs dfs -blah). These get appended to HADOOP_OPTS for -# such commands. In most cases, # this should be left empty and -# let users supply it on the command line. -# export HADOOP_CLIENT_OPTS="" - -# -# A note about classpaths. -# -# By default, Apache Hadoop overrides Java's CLASSPATH -# environment variable. It is configured such -# that it starts out blank with new entries added after passing -# a series of checks (file/dir exists, not already listed aka -# de-deduplication). During de-deduplication, wildcards and/or -# directories are *NOT* expanded to keep it simple. Therefore, -# if the computed classpath has two specific mentions of -# awesome-methods-1.0.jar, only the first one added will be seen. -# If two directories are in the classpath that both contain -# awesome-methods-1.0.jar, then Java will pick up both versions. - -# An additional, custom CLASSPATH. Site-wide configs should be -# handled via the shellprofile functionality, utilizing the -# hadoop_add_classpath function for greater control and much -# harder for apps/end-users to accidentally override. -# Similarly, end users should utilize ${HOME}/.hadooprc . -# This variable should ideally only be used as a short-cut, -# interactive way for temporary additions on the command line. -# export HADOOP_CLASSPATH="/some/cool/path/on/your/machine" - -# Should HADOOP_CLASSPATH be first in the official CLASSPATH? -# export HADOOP_USER_CLASSPATH_FIRST="yes" - -# If HADOOP_USE_CLIENT_CLASSLOADER is set, the classpath along -# with the main jar are handled by a separate isolated -# client classloader when 'hadoop jar', 'yarn jar', or 'mapred job' -# is utilized. If it is set, HADOOP_CLASSPATH and -# HADOOP_USER_CLASSPATH_FIRST are ignored. -# export HADOOP_USE_CLIENT_CLASSLOADER=true - -# HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES overrides the default definition of -# system classes for the client classloader when HADOOP_USE_CLIENT_CLASSLOADER -# is enabled. Names ending in '.' (period) are treated as package names, and -# names starting with a '-' are treated as negative matches. For example, -# export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop." - -# Enable optional, bundled Hadoop features -# This is a comma delimited list. It may NOT be overridden via .hadooprc -# Entries may be added/removed as needed. -# export HADOOP_OPTIONAL_TOOLS="@@@HADOOP_OPTIONAL_TOOLS@@@" - -### -# Options for remote shell connectivity -### - -# There are some optional components of hadoop that allow for -# command and control of remote hosts. For example, -# start-dfs.sh will attempt to bring up all NNs, DNS, etc. - -# Options to pass to SSH when one of the "log into a host and -# start/stop daemons" scripts is executed -# export HADOOP_SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s" - -# The built-in ssh handler will limit itself to 10 simultaneous connections. -# For pdsh users, this sets the fanout size ( -f ) -# Change this to increase/decrease as necessary. -# export HADOOP_SSH_PARALLEL=10 - -# Filename which contains all of the hosts for any remote execution -# helper scripts # such as workers.sh, start-dfs.sh, etc. -# export HADOOP_WORKERS="${HADOOP_CONF_DIR}/workers" - -### -# Options for all daemons -### -# - -# -# Many options may also be specified as Java properties. It is -# very common, and in many cases, desirable, to hard-set these -# in daemon _OPTS variables. Where applicable, the appropriate -# Java property is also identified. Note that many are re-used -# or set differently in certain contexts (e.g., secure vs -# non-secure) -# - -# Where (primarily) daemon log files are stored. -# ${HADOOP_HOME}/logs by default. -# Java property: hadoop.log.dir -# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs - -# A string representing this instance of hadoop. $USER by default. -# This is used in writing log and pid files, so keep that in mind! -# Java property: hadoop.id.str -# export HADOOP_IDENT_STRING=$USER - -# How many seconds to pause after stopping a daemon -# export HADOOP_STOP_TIMEOUT=5 - -# Where pid files are stored. /tmp by default. -# export HADOOP_PID_DIR=/tmp - -# Default log4j setting for interactive commands -# Java property: hadoop.root.logger -# export HADOOP_ROOT_LOGGER=INFO,console - -# Default log4j setting for daemons spawned explicitly by -# --daemon option of hadoop, hdfs, mapred and yarn command. -# Java property: hadoop.root.logger -# export HADOOP_DAEMON_ROOT_LOGGER=INFO,RFA - -# Default log level and output location for security-related messages. -# You will almost certainly want to change this on a per-daemon basis via -# the Java property (i.e., -Dhadoop.security.logger=foo). (Note that the -# defaults for the NN and 2NN override this by default.) -# Java property: hadoop.security.logger -# export HADOOP_SECURITY_LOGGER=INFO,NullAppender - -# Default process priority level -# Note that sub-processes will also run at this level! -# export HADOOP_NICENESS=0 - -# Default name for the service level authorization file -# Java property: hadoop.policy.file -# export HADOOP_POLICYFILE="hadoop-policy.xml" - -# -# NOTE: this is not used by default! <----- -# You can define variables right here and then re-use them later on. -# For example, it is common to use the same garbage collection settings -# for all the daemons. So one could define: -# -# export HADOOP_GC_SETTINGS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps" -# -# .. and then use it as per the b option under the namenode. - -### -# Secure/privileged execution -### - -# -# Out of the box, Hadoop uses jsvc from Apache Commons to launch daemons -# on privileged ports. This functionality can be replaced by providing -# custom functions. See hadoop-functions.sh for more information. -# - -# The jsvc implementation to use. Jsvc is required to run secure datanodes -# that bind to privileged ports to provide authentication of data transfer -# protocol. Jsvc is not required if SASL is configured for authentication of -# data transfer protocol using non-privileged ports. -# export JSVC_HOME=/usr/bin - -# -# This directory contains pids for secure and privileged processes. -#export HADOOP_SECURE_PID_DIR=${HADOOP_PID_DIR} - -# -# This directory contains the logs for secure and privileged processes. -# Java property: hadoop.log.dir -# export HADOOP_SECURE_LOG=${HADOOP_LOG_DIR} - -# -# When running a secure daemon, the default value of HADOOP_IDENT_STRING -# ends up being a bit bogus. Therefore, by default, the code will -# replace HADOOP_IDENT_STRING with HADOOP_xx_SECURE_USER. If one wants -# to keep HADOOP_IDENT_STRING untouched, then uncomment this line. -# export HADOOP_SECURE_IDENT_PRESERVE="true" - -### -# NameNode specific parameters -### - -# Default log level and output location for file system related change -# messages. For non-namenode daemons, the Java property must be set in -# the appropriate _OPTS if one wants something other than INFO,NullAppender -# Java property: hdfs.audit.logger -# export HDFS_AUDIT_LOGGER=INFO,NullAppender - -# Specify the JVM options to be used when starting the NameNode. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# a) Set JMX options -# export HDFS_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026" -# -# b) Set garbage collection logs -# export HDFS_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')" -# -# c) ... or set them directly -# export HDFS_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')" - -# this is the default: -# export HDFS_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS" - -### -# SecondaryNameNode specific parameters -### -# Specify the JVM options to be used when starting the SecondaryNameNode. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# This is the default: -# export HDFS_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS" - -### -# DataNode specific parameters -### -# Specify the JVM options to be used when starting the DataNode. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# This is the default: -# export HDFS_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS" - -# On secure datanodes, user to run the datanode as after dropping privileges. -# This **MUST** be uncommented to enable secure HDFS if using privileged ports -# to provide authentication of data transfer protocol. This **MUST NOT** be -# defined if SASL is configured for authentication of data transfer protocol -# using non-privileged ports. -# This will replace the hadoop.id.str Java property in secure mode. -# export HDFS_DATANODE_SECURE_USER=hdfs - -# Supplemental options for secure datanodes -# By default, Hadoop uses jsvc which needs to know to launch a -# server jvm. -# export HDFS_DATANODE_SECURE_EXTRA_OPTS="-jvm server" - -### -# NFS3 Gateway specific parameters -### -# Specify the JVM options to be used when starting the NFS3 Gateway. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_NFS3_OPTS="" - -# Specify the JVM options to be used when starting the Hadoop portmapper. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_PORTMAP_OPTS="-Xmx512m" - -# Supplemental options for priviliged gateways -# By default, Hadoop uses jsvc which needs to know to launch a -# server jvm. -# export HDFS_NFS3_SECURE_EXTRA_OPTS="-jvm server" - -# On privileged gateways, user to run the gateway as after dropping privileges -# This will replace the hadoop.id.str Java property in secure mode. -# export HDFS_NFS3_SECURE_USER=nfsserver - -### -# ZKFailoverController specific parameters -### -# Specify the JVM options to be used when starting the ZKFailoverController. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_ZKFC_OPTS="" - -### -# QuorumJournalNode specific parameters -### -# Specify the JVM options to be used when starting the QuorumJournalNode. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_JOURNALNODE_OPTS="" - -### -# HDFS Balancer specific parameters -### -# Specify the JVM options to be used when starting the HDFS Balancer. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_BALANCER_OPTS="" - -### -# HDFS Mover specific parameters -### -# Specify the JVM options to be used when starting the HDFS Mover. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_MOVER_OPTS="" - -### -# Router-based HDFS Federation specific parameters -# Specify the JVM options to be used when starting the RBF Routers. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_DFSROUTER_OPTS="" - -### -# Ozone Manager specific parameters -### -# Specify the JVM options to be used when starting the Ozone Manager. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_OM_OPTS="" - -### -# HDFS StorageContainerManager specific parameters -### -# Specify the JVM options to be used when starting the HDFS Storage Container Manager. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_STORAGECONTAINERMANAGER_OPTS="" - -### -# Advanced Users Only! -### - -# -# When building Hadoop, one can add the class paths to the commands -# via this special env var: -# export HADOOP_ENABLE_BUILD_PATHS="true" - -# -# To prevent accidents, shell commands be (superficially) locked -# to only allow certain users to execute certain subcommands. -# It uses the format of (command)_(subcommand)_USER. -# -# For example, to limit who can execute the namenode command, -# export HDFS_NAMENODE_USER=hdfs - - -### -# Registry DNS specific parameters -### -# For privileged registry DNS, user to run as after dropping privileges -# This will replace the hadoop.id.str Java property in secure mode. -# export HADOOP_REGISTRYDNS_SECURE_USER=yarn - -# Supplemental options for privileged registry DNS -# By default, Hadoop uses jsvc which needs to know to launch a -# server jvm. -# export HADOOP_REGISTRYDNS_SECURE_EXTRA_OPTS="-jvm server" diff --git a/hadoop-hdds/common/src/main/conf/hadoop-metrics2.properties b/hadoop-hdds/common/src/main/conf/hadoop-metrics2.properties deleted file mode 100644 index f67bf8e4c5b1f..0000000000000 --- a/hadoop-hdds/common/src/main/conf/hadoop-metrics2.properties +++ /dev/null @@ -1,99 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# syntax: [prefix].[source|sink].[instance].[options] -# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details - -*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink -# default sampling period, in seconds -*.period=10 - -# The namenode-metrics.out will contain metrics from all context -#namenode.sink.file.filename=namenode-metrics.out -# Specifying a special sampling period for namenode: -#namenode.sink.*.period=8 - -#datanode.sink.file.filename=datanode-metrics.out - -#resourcemanager.sink.file.filename=resourcemanager-metrics.out - -#nodemanager.sink.file.filename=nodemanager-metrics.out - -#mrappmaster.sink.file.filename=mrappmaster-metrics.out - -#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out - -# the following example split metrics of different -# context to different sinks (in this case files) -#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink -#nodemanager.sink.file_jvm.context=jvm -#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out -#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink -#nodemanager.sink.file_mapred.context=mapred -#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out - -# -# Below are for sending metrics to Ganglia -# -# for Ganglia 3.0 support -# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30 -# -# for Ganglia 3.1 support -# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31 - -# *.sink.ganglia.period=10 - -# default for supportsparse is false -# *.sink.ganglia.supportsparse=true - -#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both -#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40 - -# Tag values to use for the ganglia prefix. If not defined no tags are used. -# If '*' all tags are used. If specifying multiple tags separate them with -# commas. Note that the last segment of the property name is the context name. -# -# A typical use of tags is separating the metrics by the HDFS rpc port -# and HDFS service rpc port. -# For example: -# With following HDFS configuration: -# dfs.namenode.rpc-address is set as namenodeAddress:9110 -# dfs.namenode.servicerpc-address is set as namenodeAddress:9111 -# If no tags are used, following metric would be gathered: -# rpc.rpc.NumOpenConnections -# If using "*.sink.ganglia.tagsForPrefix.rpc=port", -# following metrics would be gathered: -# rpc.rpc.port=9110.NumOpenConnections -# rpc.rpc.port=9111.NumOpenConnections -# -#*.sink.ganglia.tagsForPrefix.jvm=ProcessName -#*.sink.ganglia.tagsForPrefix.dfs=HAState,IsOutOfSync -#*.sink.ganglia.tagsForPrefix.rpc=port -#*.sink.ganglia.tagsForPrefix.rpcdetailed=port -#*.sink.ganglia.tagsForPrefix.metricssystem=* -#*.sink.ganglia.tagsForPrefix.ugi=* -#*.sink.ganglia.tagsForPrefix.mapred= - -#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 - -#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 - -#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 - -#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 - -#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 - -#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 diff --git a/hadoop-hdds/common/src/main/conf/hadoop-policy.xml b/hadoop-hdds/common/src/main/conf/hadoop-policy.xml deleted file mode 100644 index 85e4975a78628..0000000000000 --- a/hadoop-hdds/common/src/main/conf/hadoop-policy.xml +++ /dev/null @@ -1,275 +0,0 @@ - - - - - - - - - security.client.protocol.acl - * - ACL for ClientProtocol, which is used by user code - via the DistributedFileSystem. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.client.datanode.protocol.acl - * - ACL for ClientDatanodeProtocol, the client-to-datanode protocol - for block recovery. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.datanode.protocol.acl - * - ACL for DatanodeProtocol, which is used by datanodes to - communicate with the namenode. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.inter.datanode.protocol.acl - * - ACL for InterDatanodeProtocol, the inter-datanode protocol - for updating generation timestamp. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.namenode.protocol.acl - * - ACL for NamenodeProtocol, the protocol used by the secondary - namenode to communicate with the namenode. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.admin.operations.protocol.acl - * - ACL for AdminOperationsProtocol. Used for admin commands. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.refresh.user.mappings.protocol.acl - * - ACL for RefreshUserMappingsProtocol. Used to refresh - users mappings. The ACL is a comma-separated list of user and - group names. The user and group list is separated by a blank. For - e.g. "alice,bob users,wheel". A special value of "*" means all - users are allowed. - - - - security.refresh.policy.protocol.acl - * - ACL for RefreshAuthorizationPolicyProtocol, used by the - dfsadmin and mradmin commands to refresh the security policy in-effect. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.ha.service.protocol.acl - * - ACL for HAService protocol used by HAAdmin to manage the - active and stand-by states of namenode. - - - - security.router.admin.protocol.acl - * - ACL for RouterAdmin Protocol. The ACL is a comma-separated - list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - - security.zkfc.protocol.acl - * - ACL for access to the ZK Failover Controller - - - - - security.qjournal.service.protocol.acl - * - ACL for QJournalProtocol, used by the NN to communicate with - JNs when using the QuorumJournalManager for edit logs. - - - - security.interqjournal.service.protocol.acl - * - ACL for InterQJournalProtocol, used by the JN to - communicate with other JN - - - - - security.mrhs.client.protocol.acl - * - ACL for HSClientProtocol, used by job clients to - communciate with the MR History Server job status etc. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - - - security.resourcetracker.protocol.acl - * - ACL for ResourceTrackerProtocol, used by the - ResourceManager and NodeManager to communicate with each other. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.resourcemanager-administration.protocol.acl - * - ACL for ResourceManagerAdministrationProtocol, for admin commands. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.applicationclient.protocol.acl - * - ACL for ApplicationClientProtocol, used by the ResourceManager - and applications submission clients to communicate with each other. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.applicationmaster.protocol.acl - * - ACL for ApplicationMasterProtocol, used by the ResourceManager - and ApplicationMasters to communicate with each other. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.containermanagement.protocol.acl - * - ACL for ContainerManagementProtocol protocol, used by the NodeManager - and ApplicationMasters to communicate with each other. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.resourcelocalizer.protocol.acl - * - ACL for ResourceLocalizer protocol, used by the NodeManager - and ResourceLocalizer to communicate with each other. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.job.task.protocol.acl - * - ACL for TaskUmbilicalProtocol, used by the map and reduce - tasks to communicate with the parent tasktracker. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.job.client.protocol.acl - * - ACL for MRClientProtocol, used by job clients to - communciate with the MR ApplicationMaster to query job status etc. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.applicationhistory.protocol.acl - * - ACL for ApplicationHistoryProtocol, used by the timeline - server and the generic history service client to communicate with each other. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.collector-nodemanager.protocol.acl - * - ACL for CollectorNodemanagerProtocol, used by nodemanager - if timeline service v2 is enabled, for the timeline collector and nodemanager - to communicate with each other. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.applicationmaster-nodemanager.applicationmaster.protocol.acl - * - ACL for ApplicationMasterProtocol, used by the Nodemanager - and ApplicationMasters to communicate. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.distributedscheduling.protocol.acl - * - ACL for DistributedSchedulingAMProtocol, used by the Nodemanager - and Resourcemanager to communicate. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java deleted file mode 100644 index 99972ae900389..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ /dev/null @@ -1,252 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds; - -import org.apache.hadoop.hdds.utils.db.DBProfile; - -/** - * This class contains constants for configuration keys and default values - * used in hdds. - */ -public final class HddsConfigKeys { - - public static final String HDDS_HEARTBEAT_INTERVAL = - "hdds.heartbeat.interval"; - public static final String HDDS_HEARTBEAT_INTERVAL_DEFAULT = - "30s"; - public static final String HDDS_NODE_REPORT_INTERVAL = - "hdds.node.report.interval"; - public static final String HDDS_NODE_REPORT_INTERVAL_DEFAULT = - "60s"; - public static final String HDDS_CONTAINER_REPORT_INTERVAL = - "hdds.container.report.interval"; - public static final String HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT = - "60s"; - public static final String HDDS_PIPELINE_REPORT_INTERVAL = - "hdds.pipeline.report.interval"; - public static final String HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT = - "60s"; - public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL = - "hdds.command.status.report.interval"; - public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT = - "60s"; - public static final String HDDS_CONTAINER_ACTION_MAX_LIMIT = - "hdds.container.action.max.limit"; - public static final int HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT = - 20; - public static final String HDDS_PIPELINE_ACTION_MAX_LIMIT = - "hdds.pipeline.action.max.limit"; - public static final int HDDS_PIPELINE_ACTION_MAX_LIMIT_DEFAULT = - 20; - // Configuration to allow volume choosing policy. - public static final String HDDS_DATANODE_VOLUME_CHOOSING_POLICY = - "hdds.datanode.volume.choosing.policy"; - // DB PKIProfile used by ROCKDB instances. - public static final String HDDS_DB_PROFILE = "hdds.db.profile"; - public static final DBProfile HDDS_DEFAULT_DB_PROFILE = DBProfile.DISK; - // Once a container usage crosses this threshold, it is eligible for - // closing. - public static final String HDDS_CONTAINER_CLOSE_THRESHOLD = - "hdds.container.close.threshold"; - public static final float HDDS_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f; - public static final String HDDS_SCM_SAFEMODE_ENABLED = - "hdds.scm.safemode.enabled"; - - public static final boolean HDDS_SCM_SAFEMODE_ENABLED_DEFAULT = true; - public static final String HDDS_SCM_SAFEMODE_MIN_DATANODE = - "hdds.scm.safemode.min.datanode"; - public static final int HDDS_SCM_SAFEMODE_MIN_DATANODE_DEFAULT = 1; - - public static final String - HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT = - "hdds.scm.wait.time.after.safemode.exit"; - - public static final String - HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT_DEFAULT = "5m"; - - public static final String HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK = - "hdds.scm.safemode.pipeline-availability.check"; - public static final boolean - HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK_DEFAULT = false; - - // % of containers which should have at least one reported replica - // before SCM comes out of safe mode. - public static final String HDDS_SCM_SAFEMODE_THRESHOLD_PCT = - "hdds.scm.safemode.threshold.pct"; - public static final double HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT = 0.99; - - - // percentage of healthy pipelines, where all 3 datanodes are reported in the - // pipeline. - public static final String HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT = - "hdds.scm.safemode.healthy.pipelie.pct"; - public static final double - HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT_DEFAULT = 0.10; - - public static final String HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT = - "hdds.scm.safemode.atleast.one.node.reported.pipeline.pct"; - public static final double - HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT_DEFAULT = 0.90; - - public static final String HDDS_LOCK_MAX_CONCURRENCY = - "hdds.lock.max.concurrency"; - public static final int HDDS_LOCK_MAX_CONCURRENCY_DEFAULT = 100; - // This configuration setting is used as a fallback location by all - // Ozone/HDDS services for their metadata. It is useful as a single - // config point for test/PoC clusters. - // - // In any real cluster where performance matters, the SCM, OM and DN - // metadata locations must be configured explicitly. - public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs"; - - public static final String HDDS_PROMETHEUS_ENABLED = - "hdds.prometheus.endpoint.enabled"; - - public static final String HDDS_PROFILER_ENABLED = - "hdds.profiler.endpoint.enabled"; - - public static final String HDDS_KEY_LEN = "hdds.key.len"; - public static final int HDDS_DEFAULT_KEY_LEN = 2048; - public static final String HDDS_KEY_ALGORITHM = "hdds.key.algo"; - public static final String HDDS_DEFAULT_KEY_ALGORITHM = "RSA"; - public static final String HDDS_SECURITY_PROVIDER = "hdds.security.provider"; - public static final String HDDS_DEFAULT_SECURITY_PROVIDER = "BC"; - public static final String HDDS_KEY_DIR_NAME = "hdds.key.dir.name"; - public static final String HDDS_KEY_DIR_NAME_DEFAULT = "keys"; - // TODO : Talk to StorageIO classes and see if they can return a secure - // storage location for each node. - public static final String HDDS_METADATA_DIR_NAME = "hdds.metadata.dir"; - public static final String HDDS_PRIVATE_KEY_FILE_NAME = - "hdds.priv.key.file.name"; - public static final String HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT = "private.pem"; - public static final String HDDS_PUBLIC_KEY_FILE_NAME = "hdds.public.key.file" - + ".name"; - public static final String HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT = "public.pem"; - - public static final String HDDS_BLOCK_TOKEN_EXPIRY_TIME = - "hdds.block.token.expiry.time"; - public static final String HDDS_BLOCK_TOKEN_EXPIRY_TIME_DEFAULT = "1d"; - /** - * Maximum duration of certificates issued by SCM including Self-Signed Roots. - * The formats accepted are based on the ISO-8601 duration format PnDTnHnMn.nS - * Default value is 5 years and written as P1865D. - */ - public static final String HDDS_X509_MAX_DURATION = "hdds.x509.max.duration"; - // Limit Certificate duration to a max value of 5 years. - public static final String HDDS_X509_MAX_DURATION_DEFAULT= "P1865D"; - public static final String HDDS_X509_SIGNATURE_ALGO = - "hdds.x509.signature.algorithm"; - public static final String HDDS_X509_SIGNATURE_ALGO_DEFAULT = "SHA256withRSA"; - public static final String HDDS_BLOCK_TOKEN_ENABLED = - "hdds.block.token.enabled"; - public static final boolean HDDS_BLOCK_TOKEN_ENABLED_DEFAULT = false; - - public static final String HDDS_X509_DIR_NAME = "hdds.x509.dir.name"; - public static final String HDDS_X509_DIR_NAME_DEFAULT = "certs"; - public static final String HDDS_X509_FILE_NAME = "hdds.x509.file.name"; - public static final String HDDS_X509_FILE_NAME_DEFAULT = "certificate.crt"; - - /** - * Default duration of certificates issued by SCM CA. - * The formats accepted are based on the ISO-8601 duration format PnDTnHnMn.nS - * Default value is 5 years and written as P1865D. - */ - public static final String HDDS_X509_DEFAULT_DURATION = "hdds.x509.default" + - ".duration"; - // Default Certificate duration to one year. - public static final String HDDS_X509_DEFAULT_DURATION_DEFAULT = "P365D"; - - /** - * Do not instantiate. - */ - private HddsConfigKeys() { - } - - // Enable TLS for GRPC clients/server in ozone. - public static final String HDDS_GRPC_TLS_ENABLED = "hdds.grpc.tls.enabled"; - public static final boolean HDDS_GRPC_TLS_ENABLED_DEFAULT = false; - - // Choose TLS provider the default is set to OPENSSL for better performance. - public static final String HDDS_GRPC_TLS_PROVIDER = "hdds.grpc.tls.provider"; - public static final String HDDS_GRPC_TLS_PROVIDER_DEFAULT = "OPENSSL"; - - // Test only settings for using test signed certificate, authority assume to - // be localhost. - public static final String HDDS_GRPC_TLS_TEST_CERT = "hdds.grpc.tls" + - ".test.cert"; - public static final boolean HDDS_GRPC_TLS_TEST_CERT_DEFAULT = false; - - // Comma separated acls (users, groups) allowing clients accessing - // datanode container protocol - // when hadoop.security.authorization is true, this needs to be set in - // hadoop-policy.xml, "*" allows all users/groups to access. - public static final String - HDDS_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL = - "hdds.security.client.datanode.container.protocol.acl"; - - // Comma separated acls (users, groups) allowing clients accessing - // scm container protocol - // when hadoop.security.authorization is true, this needs to be set in - // hadoop-policy.xml, "*" allows all users/groups to access. - public static final String HDDS_SECURITY_CLIENT_SCM_CONTAINER_PROTOCOL_ACL = - "hdds.security.client.scm.container.protocol.acl"; - - // Comma separated acls (users, groups) allowing clients accessing - // scm block protocol - // when hadoop.security.authorization is true, this needs to be set in - // hadoop-policy.xml, "*" allows all users/groups to access. - public static final String HDDS_SECURITY_CLIENT_SCM_BLOCK_PROTOCOL_ACL = - "hdds.security.client.scm.block.protocol.acl"; - - // Comma separated acls (users, groups) allowing clients accessing - // scm certificate protocol - // when hadoop.security.authorization is true, this needs to be set in - // hadoop-policy.xml, "*" allows all users/groups to access. - public static final String HDDS_SECURITY_CLIENT_SCM_CERTIFICATE_PROTOCOL_ACL = - "hdds.security.client.scm.certificate.protocol.acl"; - - // Determines if the Container Chunk Manager will write user data to disk - // Set to false only for specific performance tests - public static final String HDDS_CONTAINER_PERSISTDATA = - "hdds.container.chunk.persistdata"; - public static final boolean HDDS_CONTAINER_PERSISTDATA_DEFAULT = true; - - public static final String HDDS_CONTAINER_SCRUB_ENABLED = - "hdds.container.scrub.enabled"; - public static final boolean HDDS_CONTAINER_SCRUB_ENABLED_DEFAULT = false; - - public static final String HDDS_DATANODE_HTTP_ENABLED_KEY = - "hdds.datanode.http.enabled"; - public static final String HDDS_DATANODE_HTTP_BIND_HOST_KEY = - "hdds.datanode.http-bind-host"; - public static final String HDDS_DATANODE_HTTPS_BIND_HOST_KEY = - "hdds.datanode.https-bind-host"; - public static final String HDDS_DATANODE_HTTP_ADDRESS_KEY = - "hdds.datanode.http-address"; - public static final String HDDS_DATANODE_HTTPS_ADDRESS_KEY = - "hdds.datanode.https-address"; - - public static final String HDDS_DATANODE_HTTP_BIND_HOST_DEFAULT = "0.0.0.0"; - public static final int HDDS_DATANODE_HTTP_BIND_PORT_DEFAULT = 9882; - public static final int HDDS_DATANODE_HTTPS_BIND_PORT_DEFAULT = 9883; - public static final String - HDDS_DATANODE_HTTP_KERBEROS_PRINCIPAL_KEY = - "hdds.datanode.http.kerberos.principal"; - public static final String - HDDS_DATANODE_HTTP_KERBEROS_KEYTAB_FILE_KEY = - "hdds.datanode.http.kerberos.keytab"; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java deleted file mode 100644 index b244b8cf75d74..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds; - -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; - -/** - * HDDS Id generator. - */ -public final class HddsIdFactory { - private HddsIdFactory() { - } - - private static final AtomicLong LONG_COUNTER = new AtomicLong( - System.currentTimeMillis()); - - /** - * Returns an incrementing long. This class doesn't - * persist initial value for long Id's, so incremental id's after restart - * may collide with previously generated Id's. - * - * @return long - */ - public static long getLongId() { - return LONG_COUNTER.incrementAndGet(); - } - - /** - * Returns a uuid. - * - * @return UUID. - */ - public static UUID getUUId() { - return UUID.randomUUID(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java deleted file mode 100644 index d7b20fdd9172c..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ /dev/null @@ -1,505 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds; - -import javax.management.ObjectName; -import java.io.IOException; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.Calendar; -import java.util.Collection; -import java.util.HashSet; -import java.util.Map; -import java.util.Optional; -import java.util.TimeZone; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.source.JvmMetrics; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.net.DNS; -import org.apache.hadoop.net.NetUtils; - -import com.google.common.net.HostAndPort; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT; - -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * HDDS specific stateless utility functions. - */ -@InterfaceAudience.Private -@InterfaceStability.Stable -public final class HddsUtils { - - - private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class); - - /** - * The service ID of the solitary Ozone SCM service. - */ - public static final String OZONE_SCM_SERVICE_ID = "OzoneScmService"; - public static final String OZONE_SCM_SERVICE_INSTANCE_ID = - "OzoneScmServiceInstance"; - private static final TimeZone UTC_ZONE = TimeZone.getTimeZone("UTC"); - - - private static final int NO_PORT = -1; - - private HddsUtils() { - } - - /** - * Retrieve the socket address that should be used by clients to connect - * to the SCM. - * - * @param conf - * @return Target InetSocketAddress for the SCM client endpoint. - */ - public static InetSocketAddress getScmAddressForClients(Configuration conf) { - Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - - if (!host.isPresent()) { - // Fallback to Ozone SCM names. - Collection scmAddresses = getSCMAddresses(conf); - if (scmAddresses.size() > 1) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_NAMES + - " must contain a single hostname. Multiple SCM hosts are " + - "currently unsupported"); - } - host = Optional.of(scmAddresses.iterator().next().getHostName()); - } - - if (!host.isPresent()) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY + " must be defined. See" - + " https://wiki.apache.org/hadoop/Ozone#Configuration for " - + "details" - + " on configuring Ozone."); - } - - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - - return NetUtils.createSocketAddr(host.get() + ":" + port - .orElse(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT)); - } - - /** - * Retrieve the socket address that should be used by clients to connect - * to the SCM for block service. If - * {@link ScmConfigKeys#OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY} is not defined - * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used. If neither - * is defined then {@link ScmConfigKeys#OZONE_SCM_NAMES} is used. - * - * @param conf - * @return Target InetSocketAddress for the SCM block client endpoint. - * @throws IllegalArgumentException if configuration is not defined. - */ - public static InetSocketAddress getScmAddressForBlockClients( - Configuration conf) { - Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY); - - if (!host.isPresent()) { - host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - } - - if (!host.isPresent()) { - // Fallback to Ozone SCM names. - Collection scmAddresses = getSCMAddresses(conf); - if (scmAddresses.size() > 1) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_NAMES + - " must contain a single hostname. Multiple SCM hosts are " + - "currently unsupported"); - } - host = Optional.of(scmAddresses.iterator().next().getHostName()); - } - - if (!host.isPresent()) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY - + " must be defined. See" - + " https://wiki.apache.org/hadoop/Ozone#Configuration" - + " for details on configuring Ozone."); - } - - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY); - - return NetUtils.createSocketAddr(host.get() + ":" + port - .orElse(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT)); - } - - /** - * Create a scm security client. - * @param conf - Ozone configuration. - * - * @return {@link SCMSecurityProtocol} - * @throws IOException - */ - public static SCMSecurityProtocolClientSideTranslatorPB getScmSecurityClient( - OzoneConfiguration conf) throws IOException { - RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class, - ProtobufRpcEngine.class); - long scmVersion = - RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class); - InetSocketAddress address = - getScmAddressForSecurityProtocol(conf); - RetryPolicy retryPolicy = - RetryPolicies.retryForeverWithFixedSleep( - 1000, TimeUnit.MILLISECONDS); - SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient = - new SCMSecurityProtocolClientSideTranslatorPB( - RPC.getProtocolProxy(SCMSecurityProtocolPB.class, scmVersion, - address, UserGroupInformation.getCurrentUser(), - conf, NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf), retryPolicy).getProxy()); - return scmSecurityClient; - } - - /** - * Retrieve the hostname, trying the supplied config keys in order. - * Each config value may be absent, or if present in the format - * host:port (the :port part is optional). - * - * @param conf - Conf - * @param keys a list of configuration key names. - * - * @return first hostname component found from the given keys, or absent. - * @throws IllegalArgumentException if any values are not in the 'host' - * or host:port format. - */ - public static Optional getHostNameFromConfigKeys(Configuration conf, - String... keys) { - for (final String key : keys) { - final String value = conf.getTrimmed(key); - final Optional hostName = getHostName(value); - if (hostName.isPresent()) { - return hostName; - } - } - return Optional.empty(); - } - - /** - * Gets the hostname or Indicates that it is absent. - * @param value host or host:port - * @return hostname - */ - public static Optional getHostName(String value) { - if ((value == null) || value.isEmpty()) { - return Optional.empty(); - } - String hostname = value.replaceAll("\\:[0-9]+$", ""); - if (hostname.length() == 0) { - return Optional.empty(); - } else { - return Optional.of(hostname); - } - } - - /** - * Gets the port if there is one, throws otherwise. - * @param value String in host:port format. - * @return Port - */ - public static Optional getHostPort(String value) { - if ((value == null) || value.isEmpty()) { - return Optional.empty(); - } - int port = HostAndPort.fromString(value).getPortOrDefault(NO_PORT); - if (port == NO_PORT) { - return Optional.empty(); - } else { - return Optional.of(port); - } - } - - /** - * Retrieve the port number, trying the supplied config keys in order. - * Each config value may be absent, or if present in the format - * host:port (the :port part is optional). - * - * @param conf Conf - * @param keys a list of configuration key names. - * - * @return first port number component found from the given keys, or absent. - * @throws IllegalArgumentException if any values are not in the 'host' - * or host:port format. - */ - public static Optional getPortNumberFromConfigKeys( - Configuration conf, String... keys) { - for (final String key : keys) { - final String value = conf.getTrimmed(key); - final Optional hostPort = getHostPort(value); - if (hostPort.isPresent()) { - return hostPort; - } - } - return Optional.empty(); - } - - /** - * Retrieve the socket addresses of all storage container managers. - * - * @param conf - * @return A collection of SCM addresses - * @throws IllegalArgumentException If the configuration is invalid - */ - public static Collection getSCMAddresses( - Configuration conf) throws IllegalArgumentException { - Collection addresses = - new HashSet(); - Collection names = - conf.getTrimmedStringCollection(ScmConfigKeys.OZONE_SCM_NAMES); - if (names == null || names.isEmpty()) { - throw new IllegalArgumentException(ScmConfigKeys.OZONE_SCM_NAMES - + " need to be a set of valid DNS names or IP addresses." - + " Null or empty address list found."); - } - - final Optional defaultPort = Optional - .of(ScmConfigKeys.OZONE_SCM_DEFAULT_PORT); - for (String address : names) { - Optional hostname = getHostName(address); - if (!hostname.isPresent()) { - throw new IllegalArgumentException("Invalid hostname for SCM: " - + hostname); - } - Optional port = getHostPort(address); - InetSocketAddress addr = NetUtils.createSocketAddr(hostname.get(), - port.orElse(defaultPort.get())); - addresses.add(addr); - } - return addresses; - } - - public static boolean isHddsEnabled(Configuration conf) { - return conf.getBoolean(OZONE_ENABLED, OZONE_ENABLED_DEFAULT); - } - - - /** - * Returns the hostname for this datanode. If the hostname is not - * explicitly configured in the given config, then it is determined - * via the DNS class. - * - * @param conf Configuration - * - * @return the hostname (NB: may not be a FQDN) - * @throws UnknownHostException if the dfs.datanode.dns.interface - * option is used and the hostname can not be determined - */ - public static String getHostName(Configuration conf) - throws UnknownHostException { - String name = conf.get(DFS_DATANODE_HOST_NAME_KEY); - if (name == null) { - String dnsInterface = conf.get( - CommonConfigurationKeys.HADOOP_SECURITY_DNS_INTERFACE_KEY); - String nameServer = conf.get( - CommonConfigurationKeys.HADOOP_SECURITY_DNS_NAMESERVER_KEY); - boolean fallbackToHosts = false; - - if (dnsInterface == null) { - // Try the legacy configuration keys. - dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY); - nameServer = conf.get(DFS_DATANODE_DNS_NAMESERVER_KEY); - } else { - // If HADOOP_SECURITY_DNS_* is set then also attempt hosts file - // resolution if DNS fails. We will not use hosts file resolution - // by default to avoid breaking existing clusters. - fallbackToHosts = true; - } - - name = DNS.getDefaultHost(dnsInterface, nameServer, fallbackToHosts); - } - return name; - } - - /** - * Checks if the container command is read only or not. - * @param proto ContainerCommand Request proto - * @return True if its readOnly , false otherwise. - */ - public static boolean isReadOnly( - ContainerProtos.ContainerCommandRequestProto proto) { - switch (proto.getCmdType()) { - case ReadContainer: - case ReadChunk: - case ListBlock: - case GetBlock: - case GetSmallFile: - case ListContainer: - case ListChunk: - case GetCommittedBlockLength: - return true; - case CloseContainer: - case WriteChunk: - case UpdateContainer: - case CompactChunk: - case CreateContainer: - case DeleteChunk: - case DeleteContainer: - case DeleteBlock: - case PutBlock: - case PutSmallFile: - default: - return false; - } - } - - /** - * Register the provided MBean with additional JMX ObjectName properties. - * If additional properties are not supported then fallback to registering - * without properties. - * - * @param serviceName - see {@link MBeans#register} - * @param mBeanName - see {@link MBeans#register} - * @param jmxProperties - additional JMX ObjectName properties. - * @param mBean - the MBean to register. - * @return the named used to register the MBean. - */ - public static ObjectName registerWithJmxProperties( - String serviceName, String mBeanName, Map jmxProperties, - Object mBean) { - try { - - // Check support for registering with additional properties. - final Method registerMethod = MBeans.class.getMethod( - "register", String.class, String.class, - Map.class, Object.class); - - return (ObjectName) registerMethod.invoke( - null, serviceName, mBeanName, jmxProperties, mBean); - - } catch (NoSuchMethodException | IllegalAccessException | - InvocationTargetException e) { - - // Fallback - if (LOG.isTraceEnabled()) { - LOG.trace("Registering MBean {} without additional properties {}", - mBeanName, jmxProperties); - } - return MBeans.register(serviceName, mBeanName, mBean); - } - } - - /** - * Get the current UTC time in milliseconds. - * @return the current UTC time in milliseconds. - */ - public static long getUtcTime() { - return Calendar.getInstance(UTC_ZONE).getTimeInMillis(); - } - - /** - * Retrieve the socket address that should be used by clients to connect - * to the SCM for - * {@link org.apache.hadoop.hdds.protocol.SCMSecurityProtocol}. If - * {@link ScmConfigKeys#OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY} is not defined - * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used. If neither - * is defined then {@link ScmConfigKeys#OZONE_SCM_NAMES} is used. - * - * @param conf - * @return Target InetSocketAddress for the SCM block client endpoint. - * @throws IllegalArgumentException if configuration is not defined. - */ - public static InetSocketAddress getScmAddressForSecurityProtocol( - Configuration conf) { - Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY); - - if (!host.isPresent()) { - host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - } - - if (!host.isPresent()) { - // Fallback to Ozone SCM names. - Collection scmAddresses = getSCMAddresses(conf); - if (scmAddresses.size() > 1) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_NAMES + - " must contain a single hostname. Multiple SCM hosts are " + - "currently unsupported"); - } - host = Optional.of(scmAddresses.iterator().next().getHostName()); - } - - if (!host.isPresent()) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY - + " must be defined. See" - + " https://wiki.apache.org/hadoop/Ozone#Configuration" - + " for details on configuring Ozone."); - } - - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY); - - return NetUtils.createSocketAddr(host.get() + ":" + port - .orElse(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT)); - } - - /** - * Initialize hadoop metrics system for Ozone servers. - * @param configuration OzoneConfiguration to use. - * @param serverName The logical name of the server components. - * @return - */ - public static MetricsSystem initializeMetrics( - OzoneConfiguration configuration, String serverName) { - MetricsSystem metricsSystem = DefaultMetricsSystem.initialize(serverName); - JvmMetrics.create(serverName, - configuration.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), - DefaultMetricsSystem.instance()); - return metricsSystem; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java deleted file mode 100644 index 372828b95cefc..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java +++ /dev/null @@ -1,107 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.cli; - -import java.util.HashMap; -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.fs.Path; - -import com.google.common.annotations.VisibleForTesting; -import picocli.CommandLine; -import picocli.CommandLine.ExecutionException; -import picocli.CommandLine.Option; -import picocli.CommandLine.RunLast; - -/** - * This is a generic parent class for all the ozone related cli tools. - */ -public class GenericCli implements Callable, GenericParentCommand { - - @Option(names = {"--verbose"}, - description = "More verbose output. Show the stack trace of the errors.") - private boolean verbose; - - @Option(names = {"-D", "--set"}) - private Map configurationOverrides = new HashMap<>(); - - @Option(names = {"-conf"}) - private String configurationPath; - - private final CommandLine cmd; - - public GenericCli() { - cmd = new CommandLine(this); - } - - public void run(String[] argv) { - try { - execute(argv); - } catch (ExecutionException ex) { - printError(ex.getCause() == null ? ex : ex.getCause()); - System.exit(-1); - } - } - - @VisibleForTesting - public void execute(String[] argv) { - cmd.parseWithHandler(new RunLast(), argv); - } - - protected void printError(Throwable error) { - //message could be null in case of NPE. This is unexpected so we can - //print out the stack trace. - if (verbose || error.getMessage() == null - || error.getMessage().length() == 0) { - error.printStackTrace(System.err); - } else { - System.err.println(error.getMessage().split("\n")[0]); - } - } - - @Override - public Void call() throws Exception { - throw new MissingSubcommandException(cmd); - } - - @Override - public OzoneConfiguration createOzoneConfiguration() { - OzoneConfiguration ozoneConf = new OzoneConfiguration(); - if (configurationPath != null) { - ozoneConf.addResource(new Path(configurationPath)); - } - if (configurationOverrides != null) { - for (Entry entry : configurationOverrides.entrySet()) { - ozoneConf.set(entry.getKey(), entry.getValue()); - } - } - return ozoneConf; - } - - @VisibleForTesting - public picocli.CommandLine getCmd() { - return cmd; - } - - @Override - public boolean isVerbose() { - return verbose; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java deleted file mode 100644 index 6abad3e32b8d0..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.cli; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -/** - * Interface to access the higher level parameters. - */ -public interface GenericParentCommand { - - boolean isVerbose(); - - OzoneConfiguration createOzoneConfiguration(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java deleted file mode 100644 index 2f4ac4f170a83..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.cli; - -import org.apache.hadoop.hdds.utils.HddsVersionInfo; - -import picocli.CommandLine.IVersionProvider; - -/** - * Version provider for the CLI interface. - */ -public class HddsVersionProvider implements IVersionProvider { - @Override - public String[] getVersion() throws Exception { - String[] result = new String[] { - HddsVersionInfo.HDDS_VERSION_INFO.getBuildVersion() - }; - return result; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java deleted file mode 100644 index 759476579e93a..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.cli; - -import picocli.CommandLine; - -/** - * Exception to throw if subcommand is not selected but required. - */ -public class MissingSubcommandException extends CommandLine.ParameterException { - - public MissingSubcommandException(CommandLine cmd) { - super(cmd, "Incomplete command"); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java deleted file mode 100644 index 8dcc1d1a3c91a..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Generic helper class to make instantiate picocli based cli tools. - */ -package org.apache.hadoop.hdds.cli; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java deleted file mode 100644 index 07aa536c4e513..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java +++ /dev/null @@ -1,127 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.client; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -import java.util.Objects; - -/** - * BlockID of Ozone (containerID + localID + blockCommitSequenceId). - */ - -public class BlockID { - - private ContainerBlockID containerBlockID; - private long blockCommitSequenceId; - - public BlockID(long containerID, long localID) { - this(containerID, localID, 0); - } - - private BlockID(long containerID, long localID, long bcsID) { - containerBlockID = new ContainerBlockID(containerID, localID); - blockCommitSequenceId = bcsID; - } - - public BlockID(ContainerBlockID containerBlockID) { - this(containerBlockID, 0); - } - - private BlockID(ContainerBlockID containerBlockID, long bcsId) { - this.containerBlockID = containerBlockID; - blockCommitSequenceId = bcsId; - } - - public long getContainerID() { - return containerBlockID.getContainerID(); - } - - public long getLocalID() { - return containerBlockID.getLocalID(); - } - - public long getBlockCommitSequenceId() { - return blockCommitSequenceId; - } - - public void setBlockCommitSequenceId(long blockCommitSequenceId) { - this.blockCommitSequenceId = blockCommitSequenceId; - } - - public ContainerBlockID getContainerBlockID() { - return containerBlockID; - } - - public void setContainerBlockID(ContainerBlockID containerBlockID) { - this.containerBlockID = containerBlockID; - } - - @Override - public String toString() { - return new StringBuilder().append(getContainerBlockID().toString()) - .append(" bcsId: ") - .append(blockCommitSequenceId) - .toString(); - } - - public ContainerProtos.DatanodeBlockID getDatanodeBlockIDProtobuf() { - return ContainerProtos.DatanodeBlockID.newBuilder(). - setContainerID(containerBlockID.getContainerID()) - .setLocalID(containerBlockID.getLocalID()) - .setBlockCommitSequenceId(blockCommitSequenceId).build(); - } - - public static BlockID getFromProtobuf( - ContainerProtos.DatanodeBlockID blockID) { - return new BlockID(blockID.getContainerID(), - blockID.getLocalID(), blockID.getBlockCommitSequenceId()); - } - - public HddsProtos.BlockID getProtobuf() { - return HddsProtos.BlockID.newBuilder() - .setContainerBlockID(containerBlockID.getProtobuf()) - .setBlockCommitSequenceId(blockCommitSequenceId).build(); - } - - public static BlockID getFromProtobuf(HddsProtos.BlockID blockID) { - return new BlockID( - ContainerBlockID.getFromProtobuf(blockID.getContainerBlockID()), - blockID.getBlockCommitSequenceId()); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - BlockID blockID = (BlockID) o; - return containerBlockID.equals(blockID.getContainerBlockID()) - && blockCommitSequenceId == blockID.getBlockCommitSequenceId(); - } - - @Override - public int hashCode() { - return Objects - .hash(containerBlockID.getContainerID(), containerBlockID.getLocalID(), - blockCommitSequenceId); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ContainerBlockID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ContainerBlockID.java deleted file mode 100644 index 1e30cc351f91c..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ContainerBlockID.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.client; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -import java.util.Objects; - -/** - * BlockID returned by SCM during allocation of block (containerID + localID). - */ -public class ContainerBlockID { - private long containerID; - private long localID; - - public ContainerBlockID(long containerID, long localID) { - this.containerID = containerID; - this.localID = localID; - } - - public long getContainerID() { - return containerID; - } - - public long getLocalID() { - return localID; - } - - @Override - public String toString() { - return new StringBuffer() - .append("conID: ") - .append(containerID) - .append(" locID: ") - .append(localID).toString(); - } - - public HddsProtos.ContainerBlockID getProtobuf() { - return HddsProtos.ContainerBlockID.newBuilder(). - setContainerID(containerID).setLocalID(localID).build(); - } - - public static ContainerBlockID getFromProtobuf( - HddsProtos.ContainerBlockID containerBlockID) { - return new ContainerBlockID(containerBlockID.getContainerID(), - containerBlockID.getLocalID()); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ContainerBlockID blockID = (ContainerBlockID) o; - return containerID == blockID.containerID && localID == blockID.localID; - } - - @Override - public int hashCode() { - return Objects.hash(containerID, localID); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java deleted file mode 100644 index 59708a956b908..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java +++ /dev/null @@ -1,203 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.client; - -import org.apache.hadoop.ozone.OzoneConsts; - - -/** - * represents an OzoneQuota Object that can be applied to - * a storage volume. - */ -public class OzoneQuota { - - public static final String OZONE_QUOTA_BYTES = "BYTES"; - public static final String OZONE_QUOTA_MB = "MB"; - public static final String OZONE_QUOTA_GB = "GB"; - public static final String OZONE_QUOTA_TB = "TB"; - - private Units unit; - private long size; - - /** Quota Units.*/ - public enum Units {UNDEFINED, BYTES, KB, MB, GB, TB} - - /** - * Returns size. - * - * @return long - */ - public long getSize() { - return size; - } - - /** - * Returns Units. - * - * @return Unit in MB, GB or TB - */ - public Units getUnit() { - return unit; - } - - /** - * Constructs a default Quota object. - */ - public OzoneQuota() { - this.size = 0; - this.unit = Units.UNDEFINED; - } - - /** - * Constructor for Ozone Quota. - * - * @param size Long Size - * @param unit MB, GB or TB - */ - public OzoneQuota(long size, Units unit) { - this.size = size; - this.unit = unit; - } - - /** - * Formats a quota as a string. - * - * @param quota the quota to format - * @return string representation of quota - */ - public static String formatQuota(OzoneQuota quota) { - return String.valueOf(quota.size) + quota.unit; - } - - /** - * Parses a user provided string and returns the - * Quota Object. - * - * @param quotaString Quota String - * - * @return OzoneQuota object - * - * @throws IllegalArgumentException - */ - public static OzoneQuota parseQuota(String quotaString) - throws IllegalArgumentException { - - if ((quotaString == null) || (quotaString.isEmpty())) { - throw new IllegalArgumentException( - "Quota string cannot be null or empty."); - } - - String uppercase = quotaString.toUpperCase().replaceAll("\\s+", ""); - String size = ""; - int nSize; - Units currUnit = Units.MB; - Boolean found = false; - if (uppercase.endsWith(OZONE_QUOTA_MB)) { - size = uppercase - .substring(0, uppercase.length() - OZONE_QUOTA_MB.length()); - currUnit = Units.MB; - found = true; - } - - if (uppercase.endsWith(OZONE_QUOTA_GB)) { - size = uppercase - .substring(0, uppercase.length() - OZONE_QUOTA_GB.length()); - currUnit = Units.GB; - found = true; - } - - if (uppercase.endsWith(OZONE_QUOTA_TB)) { - size = uppercase - .substring(0, uppercase.length() - OZONE_QUOTA_TB.length()); - currUnit = Units.TB; - found = true; - } - - if (uppercase.endsWith(OZONE_QUOTA_BYTES)) { - size = uppercase - .substring(0, uppercase.length() - OZONE_QUOTA_BYTES.length()); - currUnit = Units.BYTES; - found = true; - } - - if (!found) { - throw new IllegalArgumentException( - "Quota unit not recognized. Supported values are BYTES, MB, GB and " + - "TB."); - } - - nSize = Integer.parseInt(size); - if (nSize < 0) { - throw new IllegalArgumentException("Quota cannot be negative."); - } - - return new OzoneQuota(nSize, currUnit); - } - - - /** - * Returns size in Bytes or -1 if there is no Quota. - */ - public long sizeInBytes() { - switch (this.unit) { - case BYTES: - return this.getSize(); - case MB: - return this.getSize() * OzoneConsts.MB; - case GB: - return this.getSize() * OzoneConsts.GB; - case TB: - return this.getSize() * OzoneConsts.TB; - case UNDEFINED: - default: - return -1; - } - } - - /** - * Returns OzoneQuota corresponding to size in bytes. - * - * @param sizeInBytes size in bytes to be converted - * - * @return OzoneQuota object - */ - public static OzoneQuota getOzoneQuota(long sizeInBytes) { - long size; - Units unit; - if (sizeInBytes % OzoneConsts.TB == 0) { - size = sizeInBytes / OzoneConsts.TB; - unit = Units.TB; - } else if (sizeInBytes % OzoneConsts.GB == 0) { - size = sizeInBytes / OzoneConsts.GB; - unit = Units.GB; - } else if (sizeInBytes % OzoneConsts.MB == 0) { - size = sizeInBytes / OzoneConsts.MB; - unit = Units.MB; - } else { - size = sizeInBytes; - unit = Units.BYTES; - } - return new OzoneQuota((int)size, unit); - } - - @Override - public String toString() { - return size + " " + unit; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java deleted file mode 100644 index 044bd6f8334cd..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.client; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -/** - * The replication factor to be used while writing key into ozone. - */ -public enum ReplicationFactor { - ONE(1), - THREE(3); - - /** - * Integer representation of replication. - */ - private int value; - - /** - * Initializes ReplicationFactor with value. - * @param value replication value - */ - ReplicationFactor(int value) { - this.value = value; - } - - /** - * Returns enum value corresponding to the int value. - * @param value replication value - * @return ReplicationFactor - */ - public static ReplicationFactor valueOf(int value) { - if(value == 1) { - return ONE; - } - if (value == 3) { - return THREE; - } - throw new IllegalArgumentException("Unsupported value: " + value); - } - - public static ReplicationFactor fromProto( - HddsProtos.ReplicationFactor replicationFactor) { - if (replicationFactor == null) { - return null; - } - switch (replicationFactor) { - case ONE: - return ReplicationFactor.ONE; - case THREE: - return ReplicationFactor.THREE; - default: - throw new IllegalArgumentException( - "Unsupported ProtoBuf replication factor: " + replicationFactor); - } - } - - /** - * Returns integer representation of ReplicationFactor. - * @return replication value - */ - public int getValue() { - return value; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java deleted file mode 100644 index c63896e9e1d13..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.client; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -/** - * The replication type to be used while writing key into ozone. - */ -public enum ReplicationType { - RATIS, - STAND_ALONE, - CHAINED; - - public static ReplicationType fromProto( - HddsProtos.ReplicationType replicationType) { - if (replicationType == null) { - return null; - } - switch (replicationType) { - case RATIS: - return ReplicationType.RATIS; - case STAND_ALONE: - return ReplicationType.STAND_ALONE; - case CHAINED: - return ReplicationType.CHAINED; - default: - throw new IllegalArgumentException( - "Unsupported ProtoBuf replication type: " + replicationType); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java deleted file mode 100644 index e81f134b259fe..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.client; - -/** - * Base property types for HDDS containers and replications. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java deleted file mode 100644 index 8beac1663b2b7..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java +++ /dev/null @@ -1,190 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import com.google.gson.Gson; -import java.io.IOException; -import java.io.Writer; - -import java.util.HashMap; -import java.util.Map; -import java.util.Properties; -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.ws.rs.core.HttpHeaders; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.http.HttpServer2; - -import com.google.common.annotations.VisibleForTesting; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TAGS_SYSTEM_KEY; - -/** - * A servlet to print out the running configuration data. - */ -@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) -@InterfaceStability.Unstable -public class HddsConfServlet extends HttpServlet { - - private static final long serialVersionUID = 1L; - - protected static final String FORMAT_JSON = "json"; - protected static final String FORMAT_XML = "xml"; - private static final String COMMAND = "cmd"; - private static final OzoneConfiguration OZONE_CONFIG = - new OzoneConfiguration(); - private static final transient Logger LOG = - LoggerFactory.getLogger(HddsConfServlet.class); - - - /** - * Return the Configuration of the daemon hosting this servlet. - * This is populated when the HttpServer starts. - */ - private Configuration getConfFromContext() { - Configuration conf = (Configuration) getServletContext().getAttribute( - HttpServer2.CONF_CONTEXT_ATTRIBUTE); - assert conf != null; - return conf; - } - - @Override - public void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { - - if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(), - request, response)) { - return; - } - - String format = parseAcceptHeader(request); - if (FORMAT_XML.equals(format)) { - response.setContentType("text/xml; charset=utf-8"); - } else if (FORMAT_JSON.equals(format)) { - response.setContentType("application/json; charset=utf-8"); - } - - String name = request.getParameter("name"); - Writer out = response.getWriter(); - String cmd = request.getParameter(COMMAND); - - processCommand(cmd, format, request, response, out, name); - out.close(); - } - - private void processCommand(String cmd, String format, - HttpServletRequest request, HttpServletResponse response, Writer out, - String name) - throws IOException { - try { - if (cmd == null) { - if (FORMAT_XML.equals(format)) { - response.setContentType("text/xml; charset=utf-8"); - } else if (FORMAT_JSON.equals(format)) { - response.setContentType("application/json; charset=utf-8"); - } - - writeResponse(getConfFromContext(), out, format, name); - } else { - processConfigTagRequest(request, out); - } - } catch (BadFormatException bfe) { - response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage()); - } catch (IllegalArgumentException iae) { - response.sendError(HttpServletResponse.SC_NOT_FOUND, iae.getMessage()); - } - } - - @VisibleForTesting - static String parseAcceptHeader(HttpServletRequest request) { - String format = request.getHeader(HttpHeaders.ACCEPT); - return format != null && format.contains(FORMAT_JSON) ? - FORMAT_JSON : FORMAT_XML; - } - - /** - * Guts of the servlet - extracted for easy testing. - */ - static void writeResponse(Configuration conf, - Writer out, String format, String propertyName) - throws IOException, IllegalArgumentException, BadFormatException { - if (FORMAT_JSON.equals(format)) { - Configuration.dumpConfiguration(conf, propertyName, out); - } else if (FORMAT_XML.equals(format)) { - conf.writeXml(propertyName, out); - } else { - throw new BadFormatException("Bad format: " + format); - } - } - - /** - * Exception for signal bad content type. - */ - public static class BadFormatException extends Exception { - - private static final long serialVersionUID = 1L; - - public BadFormatException(String msg) { - super(msg); - } - } - - private void processConfigTagRequest(HttpServletRequest request, - Writer out) throws IOException { - String cmd = request.getParameter(COMMAND); - Gson gson = new Gson(); - Configuration config = getOzoneConfig(); - - switch (cmd) { - case "getOzoneTags": - out.write(gson.toJson(config.get(OZONE_TAGS_SYSTEM_KEY) - .split(","))); - break; - case "getPropertyByTag": - String tags = request.getParameter("tags"); - Map propMap = new HashMap<>(); - - for (String tag : tags.split(",")) { - if (config.isPropertyTag(tag)) { - Properties properties = config.getAllPropertiesByTag(tag); - propMap.put(tag, properties); - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("Not a valid tag" + tag); - } - } - } - out.write(gson.toJsonTree(propMap).toString()); - break; - default: - throw new IllegalArgumentException(cmd + " is not a valid command."); - } - - } - - private static Configuration getOzoneConfig() { - return OZONE_CONFIG; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java deleted file mode 100644 index c0486335cdd2a..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ /dev/null @@ -1,328 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.conf; - -import javax.xml.bind.JAXBContext; -import javax.xml.bind.JAXBException; -import javax.xml.bind.Unmarshaller; -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.XmlRootElement; -import java.io.IOException; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.net.URL; -import java.util.ArrayList; -import java.util.Enumeration; -import java.util.List; -import java.util.Properties; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; - -/** - * Configuration for ozone. - */ -@InterfaceAudience.Private -public class OzoneConfiguration extends Configuration { - static { - activate(); - } - - public static OzoneConfiguration of(Configuration conf) { - Preconditions.checkNotNull(conf); - - return conf instanceof OzoneConfiguration - ? (OzoneConfiguration) conf - : new OzoneConfiguration(conf); - } - - public OzoneConfiguration() { - OzoneConfiguration.activate(); - loadDefaults(); - } - - public OzoneConfiguration(Configuration conf) { - super(conf); - //load the configuration from the classloader of the original conf. - setClassLoader(conf.getClassLoader()); - if (!(conf instanceof OzoneConfiguration)) { - loadDefaults(); - } - } - - private void loadDefaults() { - try { - //there could be multiple ozone-default-generated.xml files on the - // classpath, which are generated by the annotation processor. - // Here we add all of them to the list of the available configuration. - Enumeration generatedDefaults = - OzoneConfiguration.class.getClassLoader().getResources( - "ozone-default-generated.xml"); - while (generatedDefaults.hasMoreElements()) { - addResource(generatedDefaults.nextElement()); - } - } catch (IOException e) { - e.printStackTrace(); - } - addResource("ozone-site.xml"); - } - - public List readPropertyFromXml(URL url) throws JAXBException { - JAXBContext context = JAXBContext.newInstance(XMLConfiguration.class); - Unmarshaller um = context.createUnmarshaller(); - - XMLConfiguration config = (XMLConfiguration) um.unmarshal(url); - return config.getProperties(); - } - - /** - * Create a Configuration object and inject the required configuration values. - * - * @param configurationClass The class where the fields are annotated with - * the configuration. - * @return Initiated java object where the config fields are injected. - */ - public T getObject(Class configurationClass) { - - T configuration; - - try { - configuration = configurationClass.newInstance(); - } catch (InstantiationException | IllegalAccessException e) { - throw new ConfigurationException( - "Configuration class can't be created: " + configurationClass, e); - } - ConfigGroup configGroup = - configurationClass.getAnnotation(ConfigGroup.class); - String prefix = configGroup.prefix(); - - for (Method setterMethod : configurationClass.getMethods()) { - if (setterMethod.isAnnotationPresent(Config.class)) { - - String methodLocation = - configurationClass + "." + setterMethod.getName(); - - Config configAnnotation = setterMethod.getAnnotation(Config.class); - - String key = prefix + "." + configAnnotation.key(); - - Class[] parameterTypes = setterMethod.getParameterTypes(); - if (parameterTypes.length != 1) { - throw new ConfigurationException( - "@Config annotation should be used on simple setter: " - + methodLocation); - } - - ConfigType type = configAnnotation.type(); - - if (type == ConfigType.AUTO) { - type = detectConfigType(parameterTypes[0], methodLocation); - } - - //Note: default value is handled by ozone-default.xml. Here we can - //use any default. - try { - switch (type) { - case STRING: - setterMethod.invoke(configuration, get(key)); - break; - case INT: - setterMethod.invoke(configuration, - getInt(key, 0)); - break; - case BOOLEAN: - setterMethod.invoke(configuration, - getBoolean(key, false)); - break; - case LONG: - setterMethod.invoke(configuration, - getLong(key, 0)); - break; - case TIME: - setterMethod.invoke(configuration, - getTimeDuration(key, 0, configAnnotation.timeUnit())); - break; - default: - throw new ConfigurationException( - "Unsupported ConfigType " + type + " on " + methodLocation); - } - } catch (InvocationTargetException | IllegalAccessException e) { - throw new ConfigurationException( - "Can't inject configuration to " + methodLocation, e); - } - - } - } - return configuration; - - } - - private ConfigType detectConfigType(Class parameterType, - String methodLocation) { - ConfigType type; - if (parameterType == String.class) { - type = ConfigType.STRING; - } else if (parameterType == Integer.class || parameterType == int.class) { - type = ConfigType.INT; - } else if (parameterType == Long.class || parameterType == long.class) { - type = ConfigType.LONG; - } else if (parameterType == Boolean.class - || parameterType == boolean.class) { - type = ConfigType.BOOLEAN; - } else { - throw new ConfigurationException( - "Unsupported configuration type " + parameterType + " in " - + methodLocation); - } - return type; - } - - /** - * Class to marshall/un-marshall configuration from xml files. - */ - @XmlAccessorType(XmlAccessType.FIELD) - @XmlRootElement(name = "configuration") - public static class XMLConfiguration { - - @XmlElement(name = "property", type = Property.class) - private List properties = new ArrayList<>(); - - public XMLConfiguration() { - } - - public XMLConfiguration(List properties) { - this.properties = properties; - } - - public List getProperties() { - return properties; - } - - public void setProperties(List properties) { - this.properties = properties; - } - } - - /** - * Class to marshall/un-marshall configuration properties from xml files. - */ - @XmlAccessorType(XmlAccessType.FIELD) - @XmlRootElement(name = "property") - public static class Property implements Comparable { - - private String name; - private String value; - private String tag; - private String description; - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getValue() { - return value; - } - - public void setValue(String value) { - this.value = value; - } - - public String getTag() { - return tag; - } - - public void setTag(String tag) { - this.tag = tag; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - @Override - public int compareTo(Property o) { - if (this == o) { - return 0; - } - return this.getName().compareTo(o.getName()); - } - - @Override - public String toString() { - return this.getName() + " " + this.getValue() + " " + this.getTag(); - } - - @Override - public int hashCode() { - return this.getName().hashCode(); - } - - @Override - public boolean equals(Object obj) { - return (obj instanceof Property) && (((Property) obj).getName()) - .equals(this.getName()); - } - } - - public static void activate() { - // adds the default resources - Configuration.addDefaultResource("hdfs-default.xml"); - Configuration.addDefaultResource("hdfs-site.xml"); - Configuration.addDefaultResource("ozone-default.xml"); - } - - /** - * The super class method getAllPropertiesByTag - * does not override values of properties - * if there is no tag present in the configs of - * newly added resources. - * - * @param tag - * @return Properties that belong to the tag - */ - @Override - public Properties getAllPropertiesByTag(String tag) { - // Call getProps first to load the newly added resources - // before calling super.getAllPropertiesByTag - Properties updatedProps = getProps(); - Properties propertiesByTag = super.getAllPropertiesByTag(tag); - Properties props = new Properties(); - Enumeration properties = propertiesByTag.propertyNames(); - while (properties.hasMoreElements()) { - Object propertyName = properties.nextElement(); - // get the current value of the property - Object value = updatedProps.getProperty(propertyName.toString()); - if (value != null) { - props.put(propertyName, value); - } - } - return props; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java deleted file mode 100644 index 948057ebba70b..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java deleted file mode 100644 index b9d7bceb48f95..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.function; - -import com.google.protobuf.ServiceException; - -/** - * Functional interface like java.util.function.Function but with - * checked exception. - */ -@FunctionalInterface -public interface FunctionWithServiceException { - - /** - * Applies this function to the given argument. - * - * @param t the function argument - * @return the function result - */ - R apply(T t) throws ServiceException; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java deleted file mode 100644 index 915fe3557e2ce..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Functional interfaces for ozone, similar to java.util.function. - */ -package org.apache.hadoop.hdds.function; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java deleted file mode 100644 index f8894e6a7e8e6..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds; - -/** - * Generic HDDS specific configurator and helper classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java deleted file mode 100644 index 698a443fc6b44..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ /dev/null @@ -1,493 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.protocol; - -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.net.NetConstants; -import org.apache.hadoop.hdds.scm.net.NodeImpl; - -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; - -/** - * DatanodeDetails class contains details about DataNode like: - * - UUID of the DataNode. - * - IP and Hostname details. - * - Port details to which the DataNode will be listening. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class DatanodeDetails extends NodeImpl implements - Comparable { -/** - * DataNode's unique identifier in the cluster. - */ - private final UUID uuid; - - private String ipAddress; - private String hostName; - private List ports; - private String certSerialId; - - /** - * Constructs DatanodeDetails instance. DatanodeDetails.Builder is used - * for instantiating DatanodeDetails. - * @param uuid DataNode's UUID - * @param ipAddress IP Address of this DataNode - * @param hostName DataNode's hostname - * @param networkLocation DataNode's network location path - * @param ports Ports used by the DataNode - * @param certSerialId serial id from SCM issued certificate. - */ - private DatanodeDetails(String uuid, String ipAddress, String hostName, - String networkLocation, List ports, String certSerialId) { - super(hostName, networkLocation, NetConstants.NODE_COST_DEFAULT); - this.uuid = UUID.fromString(uuid); - this.ipAddress = ipAddress; - this.hostName = hostName; - this.ports = ports; - this.certSerialId = certSerialId; - } - - protected DatanodeDetails(DatanodeDetails datanodeDetails) { - super(datanodeDetails.getHostName(), datanodeDetails.getNetworkLocation(), - datanodeDetails.getCost()); - this.uuid = datanodeDetails.uuid; - this.ipAddress = datanodeDetails.ipAddress; - this.hostName = datanodeDetails.hostName; - this.ports = datanodeDetails.ports; - this.setNetworkName(datanodeDetails.getNetworkName()); - } - - /** - * Returns the DataNode UUID. - * - * @return UUID of DataNode - */ - public UUID getUuid() { - return uuid; - } - - /** - * Returns the string representation of DataNode UUID. - * - * @return UUID of DataNode - */ - public String getUuidString() { - return uuid.toString(); - } - - /** - * Sets the IP address of Datanode. - * - * @param ip IP Address - */ - public void setIpAddress(String ip) { - this.ipAddress = ip; - } - - /** - * Returns IP address of DataNode. - * - * @return IP address - */ - public String getIpAddress() { - return ipAddress; - } - - /** - * Sets the Datanode hostname. - * - * @param host hostname - */ - public void setHostName(String host) { - this.hostName = host; - } - - /** - * Returns Hostname of DataNode. - * - * @return Hostname - */ - public String getHostName() { - return hostName; - } - - /** - * Sets a DataNode Port. - * - * @param port DataNode port - */ - public void setPort(Port port) { - // If the port is already in the list remove it first and add the - // new/updated port value. - ports.remove(port); - ports.add(port); - } - - /** - * Returns all the Ports used by DataNode. - * - * @return DataNode Ports - */ - public List getPorts() { - return ports; - } - - /** - * Given the name returns port number, null if the asked port is not found. - * - * @param name Name of the port - * - * @return Port - */ - public Port getPort(Port.Name name) { - for (Port port : ports) { - if (port.getName().equals(name)) { - return port; - } - } - return null; - } - - /** - * Returns a DatanodeDetails from the protocol buffers. - * - * @param datanodeDetailsProto - protoBuf Message - * @return DatanodeDetails - */ - public static DatanodeDetails getFromProtoBuf( - HddsProtos.DatanodeDetailsProto datanodeDetailsProto) { - DatanodeDetails.Builder builder = newBuilder(); - builder.setUuid(datanodeDetailsProto.getUuid()); - if (datanodeDetailsProto.hasIpAddress()) { - builder.setIpAddress(datanodeDetailsProto.getIpAddress()); - } - if (datanodeDetailsProto.hasHostName()) { - builder.setHostName(datanodeDetailsProto.getHostName()); - } - if (datanodeDetailsProto.hasCertSerialId()) { - builder.setCertSerialId(datanodeDetailsProto.getCertSerialId()); - } - for (HddsProtos.Port port : datanodeDetailsProto.getPortsList()) { - builder.addPort(newPort( - Port.Name.valueOf(port.getName().toUpperCase()), port.getValue())); - } - if (datanodeDetailsProto.hasNetworkName()) { - builder.setNetworkName(datanodeDetailsProto.getNetworkName()); - } - if (datanodeDetailsProto.hasNetworkLocation()) { - builder.setNetworkLocation(datanodeDetailsProto.getNetworkLocation()); - } - return builder.build(); - } - - /** - * Returns a DatanodeDetails protobuf message from a datanode ID. - * @return HddsProtos.DatanodeDetailsProto - */ - public HddsProtos.DatanodeDetailsProto getProtoBufMessage() { - HddsProtos.DatanodeDetailsProto.Builder builder = - HddsProtos.DatanodeDetailsProto.newBuilder() - .setUuid(getUuidString()); - if (ipAddress != null) { - builder.setIpAddress(ipAddress); - } - if (hostName != null) { - builder.setHostName(hostName); - } - if (certSerialId != null) { - builder.setCertSerialId(certSerialId); - } - if (!Strings.isNullOrEmpty(getNetworkName())) { - builder.setNetworkName(getNetworkName()); - } - if (!Strings.isNullOrEmpty(getNetworkLocation())) { - builder.setNetworkLocation(getNetworkLocation()); - } - - for (Port port : ports) { - builder.addPorts(HddsProtos.Port.newBuilder() - .setName(port.getName().toString()) - .setValue(port.getValue()) - .build()); - } - return builder.build(); - } - - @Override - public String toString() { - return uuid.toString() + "{" + - "ip: " + - ipAddress + - ", host: " + - hostName + - ", networkLocation: " + - getNetworkLocation() + - ", certSerialId: " + certSerialId + - "}"; - } - - @Override - public int compareTo(DatanodeDetails that) { - return this.getUuid().compareTo(that.getUuid()); - } - - @Override - public boolean equals(Object obj) { - return obj instanceof DatanodeDetails && - uuid.equals(((DatanodeDetails) obj).uuid); - } - - @Override - public int hashCode() { - return uuid.hashCode(); - } - - /** - * Returns DatanodeDetails.Builder instance. - * - * @return DatanodeDetails.Builder - */ - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Builder class for building DatanodeDetails. - */ - public static final class Builder { - private String id; - private String ipAddress; - private String hostName; - private String networkName; - private String networkLocation; - private List ports; - private String certSerialId; - - /** - * Default private constructor. To create Builder instance use - * DatanodeDetails#newBuilder. - */ - private Builder() { - ports = new ArrayList<>(); - } - - /** - * Sets the DatanodeUuid. - * - * @param uuid DatanodeUuid - * @return DatanodeDetails.Builder - */ - public Builder setUuid(String uuid) { - this.id = uuid; - return this; - } - - /** - * Sets the IP address of DataNode. - * - * @param ip address - * @return DatanodeDetails.Builder - */ - public Builder setIpAddress(String ip) { - this.ipAddress = ip; - return this; - } - - /** - * Sets the hostname of DataNode. - * - * @param host hostname - * @return DatanodeDetails.Builder - */ - public Builder setHostName(String host) { - this.hostName = host; - return this; - } - - /** - * Sets the network name of DataNode. - * - * @param name network name - * @return DatanodeDetails.Builder - */ - public Builder setNetworkName(String name) { - this.networkName = name; - return this; - } - - /** - * Sets the network location of DataNode. - * - * @param loc location - * @return DatanodeDetails.Builder - */ - public Builder setNetworkLocation(String loc) { - this.networkLocation = loc; - return this; - } - - /** - * Adds a DataNode Port. - * - * @param port DataNode port - * - * @return DatanodeDetails.Builder - */ - public Builder addPort(Port port) { - this.ports.add(port); - return this; - } - - /** - * Adds certificate serial id. - * - * @param certId Serial id of SCM issued certificate. - * - * @return DatanodeDetails.Builder - */ - public Builder setCertSerialId(String certId) { - this.certSerialId = certId; - return this; - } - - /** - * Builds and returns DatanodeDetails instance. - * - * @return DatanodeDetails - */ - public DatanodeDetails build() { - Preconditions.checkNotNull(id); - if (networkLocation == null) { - networkLocation = NetConstants.DEFAULT_RACK; - } - DatanodeDetails dn = new DatanodeDetails(id, ipAddress, hostName, - networkLocation, ports, certSerialId); - if (networkName != null) { - dn.setNetworkName(networkName); - } - return dn; - } - } - - /** - * Constructs a new Port with name and value. - * - * @param name Name of the port - * @param value Port number - * - * @return {@code Port} instance - */ - public static Port newPort(Port.Name name, Integer value) { - return new Port(name, value); - } - - /** - * Container to hold DataNode Port details. - */ - public static final class Port { - - /** - * Ports that are supported in DataNode. - */ - public enum Name { - STANDALONE, RATIS, REST - } - - private Name name; - private Integer value; - - /** - * Private constructor for constructing Port object. Use - * DatanodeDetails#newPort to create a new Port object. - * - * @param name - * @param value - */ - private Port(Name name, Integer value) { - this.name = name; - this.value = value; - } - - /** - * Returns the name of the port. - * - * @return Port name - */ - public Name getName() { - return name; - } - - /** - * Returns the port number. - * - * @return Port number - */ - public Integer getValue() { - return value; - } - - @Override - public int hashCode() { - return name.hashCode(); - } - - /** - * Ports are considered equal if they have the same name. - * - * @param anObject - * The object to compare this {@code Port} against - * @return {@code true} if the given object represents a {@code Port} - and has the same name, {@code false} otherwise - */ - @Override - public boolean equals(Object anObject) { - if (this == anObject) { - return true; - } - if (anObject instanceof Port) { - return name.equals(((Port) anObject).name); - } - return false; - } - } - - /** - * Returns serial id of SCM issued certificate. - * - * @return certificate serial id - */ - public String getCertSerialId() { - return certSerialId; - } - - /** - * Set certificate serial id of SCM issued certificate. - * - */ - public void setCertSerialId(String certSerialId) { - this.certSerialId = certSerialId; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java deleted file mode 100644 index 4036cb17b8477..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.protocol; - -import java.io.IOException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.security.KerberosInfo; - -/** - * The protocol used to perform security related operations with SCM. - */ -@KerberosInfo( - serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) -@InterfaceAudience.Private -public interface SCMSecurityProtocol { - - @SuppressWarnings("checkstyle:ConstantName") - /** - * Version 1: Initial version. - */ - long versionID = 1L; - - /** - * Get SCM signed certificate for DataNode. - * - * @param dataNodeDetails - DataNode Details. - * @param certSignReq - Certificate signing request. - * @return byte[] - SCM signed certificate. - */ - String getDataNodeCertificate( - DatanodeDetailsProto dataNodeDetails, - String certSignReq) throws IOException; - - /** - * Get SCM signed certificate for OM. - * - * @param omDetails - DataNode Details. - * @param certSignReq - Certificate signing request. - * @return String - pem encoded SCM signed - * certificate. - */ - String getOMCertificate(OzoneManagerDetailsProto omDetails, - String certSignReq) throws IOException; - - /** - * Get SCM signed certificate for given certificate serial id if it exists. - * Throws exception if it's not found. - * - * @param certSerialId - Certificate serial id. - * @return String - pem encoded SCM signed - * certificate with given cert id if it - * exists. - */ - String getCertificate(String certSerialId) throws IOException; - - /** - * Get CA certificate. - * - * @return String - pem encoded CA certificate. - */ - String getCACertificate() throws IOException; - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java deleted file mode 100644 index 7dae0fce02cca..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains HDDS protocol related classes. - */ -package org.apache.hadoop.hdds.protocol; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java deleted file mode 100644 index efe79a76f31dd..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java +++ /dev/null @@ -1,213 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.protocolPB; - -import java.io.Closeable; -import java.io.IOException; -import java.util.function.Consumer; - -import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCACertificateRequestProto; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest.Builder; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityResponse; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.Type; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtocolTranslator; -import org.apache.hadoop.ipc.RPC; - -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; -import static org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetOMCertRequestProto; - -/** - * This class is the client-side translator that forwards requests for - * {@link SCMSecurityProtocol} to the {@link SCMSecurityProtocolPB} proxy. - */ -public class SCMSecurityProtocolClientSideTranslatorPB implements - SCMSecurityProtocol, ProtocolTranslator, Closeable { - - /** - * RpcController is not used and hence is set to null. - */ - private static final RpcController NULL_RPC_CONTROLLER = null; - private final SCMSecurityProtocolPB rpcProxy; - - public SCMSecurityProtocolClientSideTranslatorPB( - SCMSecurityProtocolPB rpcProxy) { - this.rpcProxy = rpcProxy; - } - - /** - * Helper method to wrap the request and send the message. - */ - private SCMSecurityResponse submitRequest( - SCMSecurityProtocolProtos.Type type, - Consumer builderConsumer) throws IOException { - final SCMSecurityResponse response; - try { - - Builder builder = SCMSecurityRequest.newBuilder() - .setCmdType(type) - .setTraceID(TracingUtil.exportCurrentSpan()); - builderConsumer.accept(builder); - SCMSecurityRequest wrapper = builder.build(); - - response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper); - } catch (ServiceException ex) { - throw ProtobufHelper.getRemoteException(ex); - } - return response; - } - - /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - * - *

As noted in {@link AutoCloseable#close()}, cases where the - * close may fail require careful attention. It is strongly advised - * to relinquish the underlying resources and to internally - * mark the {@code Closeable} as closed, prior to throwing - * the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - RPC.stopProxy(rpcProxy); - } - - /** - * Get SCM signed certificate for DataNode. - * - * @param dataNodeDetails - DataNode Details. - * @param certSignReq - Certificate signing request. - * @return byte[] - SCM signed certificate. - */ - @Override - public String getDataNodeCertificate(DatanodeDetailsProto dataNodeDetails, - String certSignReq) throws IOException { - return getDataNodeCertificateChain(dataNodeDetails, certSignReq) - .getX509Certificate(); - } - - /** - * Get SCM signed certificate for OM. - * - * @param omDetails - OzoneManager Details. - * @param certSignReq - Certificate signing request. - * @return byte[] - SCM signed certificate. - */ - @Override - public String getOMCertificate(OzoneManagerDetailsProto omDetails, - String certSignReq) throws IOException { - return getOMCertChain(omDetails, certSignReq).getX509Certificate(); - } - - /** - * Get SCM signed certificate for OM. - * - * @param omDetails - OzoneManager Details. - * @param certSignReq - Certificate signing request. - * @return byte[] - SCM signed certificate. - */ - public SCMGetCertResponseProto getOMCertChain( - OzoneManagerDetailsProto omDetails, String certSignReq) - throws IOException { - SCMGetOMCertRequestProto request = SCMGetOMCertRequestProto - .newBuilder() - .setCSR(certSignReq) - .setOmDetails(omDetails) - .build(); - return submitRequest(Type.GetOMCertificate, - builder -> builder.setGetOMCertRequest(request)) - .getGetCertResponseProto(); - } - - /** - * Get SCM signed certificate with given serial id. Throws exception if - * certificate is not found. - * - * @param certSerialId - Certificate serial id. - * @return string - pem encoded certificate. - */ - @Override - public String getCertificate(String certSerialId) throws IOException { - SCMGetCertificateRequestProto request = SCMGetCertificateRequestProto - .newBuilder() - .setCertSerialId(certSerialId) - .build(); - return submitRequest(Type.GetCertificate, - builder -> builder.setGetCertificateRequest(request)) - .getGetCertResponseProto() - .getX509Certificate(); - } - - /** - * Get SCM signed certificate for Datanode. - * - * @param dnDetails - Datanode Details. - * @param certSignReq - Certificate signing request. - * @return byte[] - SCM signed certificate. - */ - public SCMGetCertResponseProto getDataNodeCertificateChain( - DatanodeDetailsProto dnDetails, String certSignReq) - throws IOException { - - SCMGetDataNodeCertRequestProto request = - SCMGetDataNodeCertRequestProto.newBuilder() - .setCSR(certSignReq) - .setDatanodeDetails(dnDetails) - .build(); - return submitRequest(Type.GetDataNodeCertificate, - builder -> builder.setGetDataNodeCertRequest(request)) - .getGetCertResponseProto(); - } - - /** - * Get CA certificate. - * - * @return serial - Root certificate. - */ - @Override - public String getCACertificate() throws IOException { - SCMGetCACertificateRequestProto protoIns = SCMGetCACertificateRequestProto - .getDefaultInstance(); - return submitRequest(Type.GetCACertificate, - builder -> builder.setGetCACertificateRequest(protoIns)) - .getGetCertResponseProto().getX509Certificate(); - - } - - /** - * Return the proxy object underlying this protocol translator. - * - * @return the proxy object underlying this protocol translator. - */ - @Override - public Object getUnderlyingProxyObject() { - return rpcProxy; - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolPB.java deleted file mode 100644 index 41b0332d6d3cd..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolPB.java +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.protocolPB; - -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityProtocolService; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ipc.ProtocolInfo; -import org.apache.hadoop.security.KerberosInfo; - -/** - * Protocol for security related operations on SCM. - */ - -@ProtocolInfo(protocolName = - "org.apache.hadoop.hdds.protocol.SCMSecurityProtocol", - protocolVersion = 1) -@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) -public interface SCMSecurityProtocolPB extends - SCMSecurityProtocolService.BlockingInterface { - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java deleted file mode 100644 index 44960194f075a..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.protocolPB; -/** - * This package contains classes for wiring HDDS protobuf calls to rpc. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java deleted file mode 100644 index 07a886a0f9c0d..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.ratis; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftGroupId; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.ratis.util.JavaUtils; - -import java.util.Objects; -import java.util.function.Supplier; - -/** - * Implementing the {@link Message} interface - * for {@link ContainerCommandRequestProto}. - */ -public final class ContainerCommandRequestMessage implements Message { - public static ContainerCommandRequestMessage toMessage( - ContainerCommandRequestProto request, String traceId) { - final ContainerCommandRequestProto.Builder b - = ContainerCommandRequestProto.newBuilder(request); - if (traceId != null) { - b.setTraceID(traceId); - } - - ByteString data = ByteString.EMPTY; - if (request.getCmdType() == Type.WriteChunk) { - final WriteChunkRequestProto w = request.getWriteChunk(); - data = w.getData(); - b.setWriteChunk(w.toBuilder().clearData()); - } else if (request.getCmdType() == Type.PutSmallFile) { - final PutSmallFileRequestProto p = request.getPutSmallFile(); - data = p.getData(); - b.setPutSmallFile(p.toBuilder().setData(ByteString.EMPTY)); - } - return new ContainerCommandRequestMessage(b.build(), data); - } - - public static ContainerCommandRequestProto toProto( - ByteString bytes, RaftGroupId groupId) - throws InvalidProtocolBufferException { - final int i = 4 + bytes.asReadOnlyByteBuffer().getInt(); - final ContainerCommandRequestProto header - = ContainerCommandRequestProto.parseFrom(bytes.substring(4, i)); - // TODO: setting pipeline id can be avoided if the client is sending it. - // In such case, just have to validate the pipeline id. - final ContainerCommandRequestProto.Builder b = header.toBuilder(); - if (groupId != null) { - b.setPipelineID(groupId.getUuid().toString()); - } - final ByteString data = bytes.substring(i); - if (header.getCmdType() == Type.WriteChunk) { - b.setWriteChunk(b.getWriteChunkBuilder().setData(data)); - } else if (header.getCmdType() == Type.PutSmallFile) { - b.setPutSmallFile(b.getPutSmallFileBuilder().setData(data)); - } - return b.build(); - } - - private final ContainerCommandRequestProto header; - private final ByteString data; - private final Supplier contentSupplier - = JavaUtils.memoize(this::buildContent); - - private ContainerCommandRequestMessage( - ContainerCommandRequestProto header, ByteString data) { - this.header = Objects.requireNonNull(header, "header == null"); - this.data = Objects.requireNonNull(data, "data == null"); - } - - private ByteString buildContent() { - final ByteString headerBytes = header.toByteString(); - return RatisHelper.int2ByteString(headerBytes.size()) - .concat(headerBytes) - .concat(data); - } - - @Override - public ByteString getContent() { - return contentSupplier.get(); - } - - @Override - public String toString() { - return header + ", data.size=" + data.size(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java deleted file mode 100644 index 081b4fb766be8..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ /dev/null @@ -1,290 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.ratis; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; - -import org.apache.ratis.RaftConfigKeys; -import org.apache.ratis.client.RaftClient; -import org.apache.ratis.client.RaftClientConfigKeys; -import org.apache.ratis.conf.RaftProperties; -import org.apache.ratis.grpc.GrpcConfigKeys; -import org.apache.ratis.grpc.GrpcFactory; -import org.apache.ratis.grpc.GrpcTlsConfig; -import org.apache.ratis.proto.RaftProtos; -import org.apache.ratis.protocol.RaftGroup; -import org.apache.ratis.protocol.RaftGroupId; -import org.apache.ratis.protocol.RaftPeer; -import org.apache.ratis.protocol.RaftPeerId; -import org.apache.ratis.retry.RetryPolicies; -import org.apache.ratis.retry.RetryPolicy; -import org.apache.ratis.rpc.RpcType; -import org.apache.ratis.rpc.SupportedRpcType; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.util.SizeInBytes; -import org.apache.ratis.util.TimeDuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Ratis helper methods. - */ -public interface RatisHelper { - Logger LOG = LoggerFactory.getLogger(RatisHelper.class); - - static String toRaftPeerIdString(DatanodeDetails id) { - return id.getUuidString(); - } - - static UUID toDatanodeId(String peerIdString) { - return UUID.fromString(peerIdString); - } - - static UUID toDatanodeId(RaftPeerId peerId) { - return toDatanodeId(peerId.toString()); - } - - static UUID toDatanodeId(RaftProtos.RaftPeerProto peerId) { - return toDatanodeId(RaftPeerId.valueOf(peerId.getId())); - } - - static String toRaftPeerAddressString(DatanodeDetails id) { - return id.getIpAddress() + ":" + - id.getPort(DatanodeDetails.Port.Name.RATIS).getValue(); - } - - static RaftPeerId toRaftPeerId(DatanodeDetails id) { - return RaftPeerId.valueOf(toRaftPeerIdString(id)); - } - - static RaftPeer toRaftPeer(DatanodeDetails id) { - return new RaftPeer(toRaftPeerId(id), toRaftPeerAddressString(id)); - } - - static List toRaftPeers(Pipeline pipeline) { - return toRaftPeers(pipeline.getNodes()); - } - - static List toRaftPeers( - List datanodes) { - return datanodes.stream().map(RatisHelper::toRaftPeer) - .collect(Collectors.toList()); - } - - /* TODO: use a dummy id for all groups for the moment. - * It should be changed to a unique id for each group. - */ - RaftGroupId DUMMY_GROUP_ID = - RaftGroupId.valueOf(ByteString.copyFromUtf8("AOzoneRatisGroup")); - - RaftGroup EMPTY_GROUP = RaftGroup.valueOf(DUMMY_GROUP_ID, - Collections.emptyList()); - - static RaftGroup emptyRaftGroup() { - return EMPTY_GROUP; - } - - static RaftGroup newRaftGroup(Collection peers) { - return peers.isEmpty()? emptyRaftGroup() - : RaftGroup.valueOf(DUMMY_GROUP_ID, peers); - } - - static RaftGroup newRaftGroup(RaftGroupId groupId, - Collection peers) { - final List newPeers = peers.stream() - .map(RatisHelper::toRaftPeer) - .collect(Collectors.toList()); - return peers.isEmpty() ? RaftGroup.valueOf(groupId, Collections.emptyList()) - : RaftGroup.valueOf(groupId, newPeers); - } - - static RaftGroup newRaftGroup(Pipeline pipeline) { - return RaftGroup.valueOf(RaftGroupId.valueOf(pipeline.getId().getId()), - toRaftPeers(pipeline)); - } - - static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline, - RetryPolicy retryPolicy, int maxOutStandingRequest, - GrpcTlsConfig tlsConfig, TimeDuration timeout) throws IOException { - return newRaftClient(rpcType, toRaftPeerId(pipeline.getFirstNode()), - newRaftGroup(RaftGroupId.valueOf(pipeline.getId().getId()), - pipeline.getNodes()), retryPolicy, maxOutStandingRequest, tlsConfig, - timeout); - } - - static TimeDuration getClientRequestTimeout(Configuration conf) { - // Set the client requestTimeout - final TimeUnit timeUnit = - OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT - .getUnit(); - final long duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY, - OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT - .getDuration(), timeUnit); - final TimeDuration clientRequestTimeout = - TimeDuration.valueOf(duration, timeUnit); - return clientRequestTimeout; - } - - static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader, - RetryPolicy retryPolicy, int maxOutstandingRequests, - GrpcTlsConfig tlsConfig, TimeDuration clientRequestTimeout) { - return newRaftClient(rpcType, leader.getId(), - newRaftGroup(new ArrayList<>(Arrays.asList(leader))), retryPolicy, - maxOutstandingRequests, tlsConfig, clientRequestTimeout); - } - - static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader, - RetryPolicy retryPolicy, int maxOutstandingRequests, - TimeDuration clientRequestTimeout) { - return newRaftClient(rpcType, leader.getId(), - newRaftGroup(new ArrayList<>(Arrays.asList(leader))), retryPolicy, - maxOutstandingRequests, null, clientRequestTimeout); - } - - static RaftClient newRaftClient(RpcType rpcType, RaftPeerId leader, - RaftGroup group, RetryPolicy retryPolicy, int maxOutStandingRequest, - GrpcTlsConfig tlsConfig, TimeDuration clientRequestTimeout) { - if (LOG.isTraceEnabled()) { - LOG.trace("newRaftClient: {}, leader={}, group={}", - rpcType, leader, group); - } - final RaftProperties properties = new RaftProperties(); - RaftConfigKeys.Rpc.setType(properties, rpcType); - RaftClientConfigKeys.Rpc - .setRequestTimeout(properties, clientRequestTimeout); - - GrpcConfigKeys.setMessageSizeMax(properties, - SizeInBytes.valueOf(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE)); - GrpcConfigKeys.OutputStream.setOutstandingAppendsMax(properties, - maxOutStandingRequest); - - RaftClient.Builder builder = RaftClient.newBuilder() - .setRaftGroup(group) - .setLeaderId(leader) - .setProperties(properties) - .setRetryPolicy(retryPolicy); - - // TODO: GRPC TLS only for now, netty/hadoop RPC TLS support later. - if (tlsConfig != null && rpcType == SupportedRpcType.GRPC) { - builder.setParameters(GrpcFactory.newRaftParameters(tlsConfig)); - } - return builder.build(); - } - - // For External gRPC client to server with gRPC TLS. - // No mTLS for external client as SCM CA does not issued certificates for them - static GrpcTlsConfig createTlsClientConfig(SecurityConfig conf, - X509Certificate caCert) { - GrpcTlsConfig tlsConfig = null; - if (conf.isSecurityEnabled() && conf.isGrpcTlsEnabled()) { - tlsConfig = new GrpcTlsConfig(null, null, - caCert, false); - } - return tlsConfig; - } - - // For Internal gRPC client from SCM to DN with gRPC TLS - static GrpcTlsConfig createTlsClientConfigForSCM(SecurityConfig conf, - CertificateServer certificateServer) throws IOException { - if (conf.isSecurityEnabled() && conf.isGrpcTlsEnabled()) { - try { - X509Certificate caCert = - CertificateCodec.getX509Certificate( - certificateServer.getCACertificate()); - return new GrpcTlsConfig(null, null, - caCert, false); - } catch (CertificateException ex) { - throw new SCMSecurityException("Fail to find SCM CA certificate.", ex); - } - } - return null; - } - - // For gRPC server running DN container service with gPRC TLS - // No mTLS as the channel is shared for for external client, which - // does not have SCM CA issued certificates. - // In summary: - // authenticate from server to client is via TLS. - // authenticate from client to server is via block token (or container token). - static GrpcTlsConfig createTlsServerConfigForDN(SecurityConfig conf, - CertificateClient caClient) { - if (conf.isSecurityEnabled() && conf.isGrpcTlsEnabled()) { - return new GrpcTlsConfig( - caClient.getPrivateKey(), caClient.getCertificate(), - null, false); - } - return null; - } - - static RetryPolicy createRetryPolicy(Configuration conf) { - int maxRetryCount = - conf.getInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, - OzoneConfigKeys. - DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT); - long retryInterval = conf.getTimeDuration(OzoneConfigKeys. - DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY, OzoneConfigKeys. - DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT - .toIntExact(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS); - TimeDuration sleepDuration = - TimeDuration.valueOf(retryInterval, TimeUnit.MILLISECONDS); - RetryPolicy retryPolicy = RetryPolicies - .retryUpToMaximumCountWithFixedSleep(maxRetryCount, sleepDuration); - return retryPolicy; - } - - static Long getMinReplicatedIndex( - Collection commitInfos) { - return commitInfos.stream().map(RaftProtos.CommitInfoProto::getCommitIndex) - .min(Long::compareTo).orElse(null); - } - - static ByteString int2ByteString(int n) { - final ByteString.Output out = ByteString.newOutput(); - try(DataOutputStream dataOut = new DataOutputStream(out)) { - dataOut.writeInt(n); - } catch (IOException e) { - throw new IllegalStateException( - "Failed to write integer n = " + n + " to a ByteString.", e); - } - return out.toByteString(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java deleted file mode 100644 index e52dc7ffc70bb..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.ratis; - -/** - * This package contains classes related to Apache Ratis. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java deleted file mode 100644 index 4608df7612287..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; - -import java.nio.ByteBuffer; -import java.util.function.Function; - -/** - * Helper class to create a conversion function from ByteBuffer to ByteString - * based on the property - * {@link OzoneConfigKeys#OZONE_UNSAFEBYTEOPERATIONS_ENABLED} in the - * Ozone configuration. - */ -public final class ByteStringConversion { - private ByteStringConversion(){} // no instantiation. - - /** - * Creates the conversion function to be used to convert ByteBuffers to - * ByteString instances to be used in protobuf messages. - * - * @param config the Ozone configuration - * @return the conversion function defined by - * {@link OzoneConfigKeys#OZONE_UNSAFEBYTEOPERATIONS_ENABLED} - * @see

ByteBuffer
- */ - public static Function createByteBufferConversion( - Configuration config){ - boolean unsafeEnabled = - config!=null && config.getBoolean( - OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED, - OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT); - if (unsafeEnabled) { - return buffer -> UnsafeByteOperations.unsafeWrap(buffer); - } else { - return buffer -> { - ByteString retval = ByteString.copyFrom(buffer); - buffer.flip(); - return retval; - }; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java deleted file mode 100644 index 161780668ab0c..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ /dev/null @@ -1,375 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.ratis.proto.RaftProtos.ReplicationLevel; -import org.apache.ratis.util.TimeDuration; - -import java.util.concurrent.TimeUnit; - -/** - * This class contains constants for configuration keys used in SCM. - */ -@InterfaceAudience.Public -@InterfaceStability.Unstable -public final class ScmConfigKeys { - - // Location of SCM DB files. For now we just support a single - // metadata dir but in future we may support multiple for redundancy or - // performance. - public static final String OZONE_SCM_DB_DIRS = "ozone.scm.db.dirs"; - - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = "dfs.container.ratis.enabled"; - public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT - = false; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = "dfs.container.ratis.rpc.type"; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT - = "GRPC"; - public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY - = "dfs.container.ratis.num.write.chunk.threads"; - public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT - = 60; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = "dfs.container.ratis.replication.level"; - public static final ReplicationLevel - DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = "dfs.container.ratis.num.container.op.executors"; - public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT - = 10; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY = - "dfs.container.ratis.segment.size"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = - "1MB"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = - "dfs.container.ratis.segment.preallocated.size"; - public static final String - DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "16KB"; - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - "dfs.container.ratis.statemachinedata.sync.timeout"; - public static final TimeDuration - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = - TimeDuration.valueOf(10, TimeUnit.SECONDS); - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = - "dfs.container.ratis.statemachinedata.sync.retries"; - public static final int - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT = -1; - public static final String - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS = - "dfs.container.ratis.statemachine.max.pending.apply-transactions"; - // The default value of maximum number of pending state machine apply - // transactions is kept same as default snapshot threshold. - public static final int - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT = - 100000; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = - "dfs.container.ratis.log.queue.num-elements"; - public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = - 1024; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = - "dfs.container.ratis.log.queue.byte-limit"; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = - "4GB"; - public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = - "dfs.container.ratis.log.appender.queue.num-elements"; - public static final int - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1; - public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = - "dfs.container.ratis.log.appender.queue.byte-limit"; - public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; - public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = - "dfs.container.ratis.log.purge.gap"; - // TODO: Set to 1024 once RATIS issue around purge is fixed. - public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = - 1000000; - - public static final String DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS = - "dfs.container.ratis.leader.num.pending.requests"; - public static final int - DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT = 4096; - // expiry interval stateMachineData cache entry inside containerStateMachine - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL = - "dfs.container.ratis.statemachine.cache.expiry.interval"; - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT = - "10s"; - public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY = - "dfs.ratis.client.request.timeout.duration"; - public static final TimeDuration - DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT = - TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS); - public static final String DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY = - "dfs.ratis.client.request.max.retries"; - public static final int DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT = 180; - public static final String DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY = - "dfs.ratis.client.request.retry.interval"; - public static final TimeDuration - DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT = - TimeDuration.valueOf(1000, TimeUnit.MILLISECONDS); - public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = - "dfs.ratis.server.retry-cache.timeout.duration"; - public static final TimeDuration - DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = - TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS); - public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY = - "dfs.ratis.server.request.timeout.duration"; - public static final TimeDuration - DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT = - TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS); - public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - "dfs.ratis.leader.election.minimum.timeout.duration"; - public static final TimeDuration - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = - TimeDuration.valueOf(5, TimeUnit.SECONDS); - - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - "dfs.ratis.snapshot.threshold"; - public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; - - public static final String DFS_RATIS_SERVER_FAILURE_DURATION_KEY = - "dfs.ratis.server.failure.duration"; - public static final TimeDuration - DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT = - TimeDuration.valueOf(120, TimeUnit.SECONDS); - - // TODO : this is copied from OzoneConsts, may need to move to a better place - public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size"; - // 16 MB by default - public static final String OZONE_SCM_CHUNK_SIZE_DEFAULT = "16MB"; - - public static final String OZONE_SCM_CLIENT_PORT_KEY = - "ozone.scm.client.port"; - public static final int OZONE_SCM_CLIENT_PORT_DEFAULT = 9860; - - public static final String OZONE_SCM_DATANODE_PORT_KEY = - "ozone.scm.datanode.port"; - public static final int OZONE_SCM_DATANODE_PORT_DEFAULT = 9861; - - // OZONE_OM_PORT_DEFAULT = 9862 - public static final String OZONE_SCM_BLOCK_CLIENT_PORT_KEY = - "ozone.scm.block.client.port"; - public static final int OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT = 9863; - - public static final String OZONE_SCM_SECURITY_SERVICE_PORT_KEY = - "ozone.scm.security.service.port"; - public static final int OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT = 9961; - - // Container service client - public static final String OZONE_SCM_CLIENT_ADDRESS_KEY = - "ozone.scm.client.address"; - public static final String OZONE_SCM_CLIENT_BIND_HOST_KEY = - "ozone.scm.client.bind.host"; - public static final String OZONE_SCM_CLIENT_BIND_HOST_DEFAULT = - "0.0.0.0"; - - // Block service client - public static final String OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY = - "ozone.scm.block.client.address"; - public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY = - "ozone.scm.block.client.bind.host"; - public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT = - "0.0.0.0"; - - // SCM Security service address. - public static final String OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY = - "ozone.scm.security.service.address"; - public static final String OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY = - "ozone.scm.security.service.bind.host"; - public static final String OZONE_SCM_SECURITY_SERVICE_BIND_HOST_DEFAULT = - "0.0.0.0"; - - public static final String OZONE_SCM_DATANODE_ADDRESS_KEY = - "ozone.scm.datanode.address"; - public static final String OZONE_SCM_DATANODE_BIND_HOST_KEY = - "ozone.scm.datanode.bind.host"; - public static final String OZONE_SCM_DATANODE_BIND_HOST_DEFAULT = - "0.0.0.0"; - - public static final String OZONE_SCM_HTTP_ENABLED_KEY = - "ozone.scm.http.enabled"; - public static final String OZONE_SCM_HTTP_BIND_HOST_KEY = - "ozone.scm.http-bind-host"; - public static final String OZONE_SCM_HTTPS_BIND_HOST_KEY = - "ozone.scm.https-bind-host"; - public static final String OZONE_SCM_HTTP_ADDRESS_KEY = - "ozone.scm.http-address"; - public static final String OZONE_SCM_HTTPS_ADDRESS_KEY = - "ozone.scm.https-address"; - public static final String HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY = - "hdds.scm.kerberos.keytab.file"; - public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY = - "hdds.scm.kerberos.principal"; - public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0"; - public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876; - public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877; - - public static final String HDDS_REST_HTTP_ADDRESS_KEY = - "hdds.rest.http-address"; - public static final String HDDS_REST_HTTP_ADDRESS_DEFAULT = "0.0.0.0:9880"; - public static final String HDDS_DATANODE_DIR_KEY = "hdds.datanode.dir"; - public static final String HDDS_REST_CSRF_ENABLED_KEY = - "hdds.rest.rest-csrf.enabled"; - public static final boolean HDDS_REST_CSRF_ENABLED_DEFAULT = false; - public static final String HDDS_REST_NETTY_HIGH_WATERMARK = - "hdds.rest.netty.high.watermark"; - public static final int HDDS_REST_NETTY_HIGH_WATERMARK_DEFAULT = 65536; - public static final int HDDS_REST_NETTY_LOW_WATERMARK_DEFAULT = 32768; - public static final String HDDS_REST_NETTY_LOW_WATERMARK = - "hdds.rest.netty.low.watermark"; - - public static final String OZONE_SCM_HANDLER_COUNT_KEY = - "ozone.scm.handler.count.key"; - public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10; - - public static final String OZONE_SCM_SECURITY_HANDLER_COUNT_KEY = - "ozone.scm.security.handler.count.key"; - public static final int OZONE_SCM_SECURITY_HANDLER_COUNT_DEFAULT = 2; - - public static final String OZONE_SCM_DEADNODE_INTERVAL = - "ozone.scm.dead.node.interval"; - public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT = - "10m"; - - public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL = - "ozone.scm.heartbeat.thread.interval"; - public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT = - "3s"; - - public static final String OZONE_SCM_STALENODE_INTERVAL = - "ozone.scm.stale.node.interval"; - public static final String OZONE_SCM_STALENODE_INTERVAL_DEFAULT = - "5m"; - - public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT = - "ozone.scm.heartbeat.rpc-timeout"; - public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT = - "1s"; - - /** - * Defines how frequently we will log the missing of heartbeat to a specific - * SCM. In the default case we will write a warning message for each 10 - * sequential heart beats that we miss to a specific SCM. This is to avoid - * overrunning the log with lots of HB missed Log statements. - */ - public static final String OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT = - "ozone.scm.heartbeat.log.warn.interval.count"; - public static final int OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT = - 10; - - // ozone.scm.names key is a set of DNS | DNS:PORT | IP Address | IP:PORT. - // Written as a comma separated string. e.g. scm1, scm2:8020, 7.7.7.7:7777 - // - // If this key is not specified datanodes will not be able to find - // SCM. The SCM membership can be dynamic, so this key should contain - // all possible SCM names. Once the SCM leader is discovered datanodes will - // get the right list of SCMs to heartbeat to from the leader. - // While it is good for the datanodes to know the names of all SCM nodes, - // it is sufficient to actually know the name of on working SCM. That SCM - // will be able to return the information about other SCMs that are part of - // the SCM replicated Log. - // - //In case of a membership change, any one of the SCM machines will be - // able to send back a new list to the datanodes. - public static final String OZONE_SCM_NAMES = "ozone.scm.names"; - - public static final int OZONE_SCM_DEFAULT_PORT = - OZONE_SCM_DATANODE_PORT_DEFAULT; - // The path where datanode ID is to be written to. - // if this value is not set then container startup will fail. - public static final String OZONE_SCM_DATANODE_ID_DIR = - "ozone.scm.datanode.id.dir"; - - public static final String OZONE_SCM_DB_CACHE_SIZE_MB = - "ozone.scm.db.cache.size.mb"; - public static final int OZONE_SCM_DB_CACHE_SIZE_DEFAULT = 128; - - public static final String OZONE_SCM_CONTAINER_SIZE = - "ozone.scm.container.size"; - public static final String OZONE_SCM_CONTAINER_SIZE_DEFAULT = "5GB"; - - public static final String OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY = - "ozone.scm.container.placement.impl"; - - public static final String OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT = - "ozone.scm.pipeline.owner.container.count"; - public static final int OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT = 3; - - public static final String - OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY = - "ozone.scm.keyvalue.container.deletion-choosing.policy"; - - public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT = - "ozone.scm.container.creation.lease.timeout"; - - public static final String - OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s"; - - public static final String OZONE_SCM_PIPELINE_DESTROY_TIMEOUT = - "ozone.scm.pipeline.destroy.timeout"; - - public static final String OZONE_SCM_PIPELINE_DESTROY_TIMEOUT_DEFAULT = - "66s"; - - public static final String OZONE_SCM_PIPELINE_CREATION_INTERVAL = - "ozone.scm.pipeline.creation.interval"; - public static final String OZONE_SCM_PIPELINE_CREATION_INTERVAL_DEFAULT = - "120s"; - - public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY = - "ozone.scm.block.deletion.max.retry"; - public static final int OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT = 4096; - - public static final String HDDS_SCM_WATCHER_TIMEOUT = - "hdds.scm.watcher.timeout"; - - public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT = - "10m"; - - public static final String - HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY = - "hdds.scm.http.kerberos.principal"; - public static final String - HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY = - "hdds.scm.http.kerberos.keytab"; - - // Network topology - public static final String OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE = - "ozone.scm.network.topology.schema.file"; - public static final String OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT = - "network-topology-default.xml"; - - public static final String HDDS_TRACING_ENABLED = "hdds.tracing.enabled"; - public static final boolean HDDS_TRACING_ENABLED_DEFAULT = true; - - /** - * Never constructed. - */ - private ScmConfigKeys() { - - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java deleted file mode 100644 index 6236febb7b120..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -/** - * ScmInfo wraps the result returned from SCM#getScmInfo which - * contains clusterId and the SCM Id. - */ -public final class ScmInfo { - private String clusterId; - private String scmId; - - /** - * Builder for ScmInfo. - */ - public static class Builder { - private String clusterId; - private String scmId; - - /** - * sets the cluster id. - * @param cid clusterId to be set - * @return Builder for ScmInfo - */ - public Builder setClusterId(String cid) { - this.clusterId = cid; - return this; - } - - /** - * sets the scmId. - * @param id scmId - * @return Builder for scmInfo - */ - public Builder setScmId(String id) { - this.scmId = id; - return this; - } - - public ScmInfo build() { - return new ScmInfo(clusterId, scmId); - } - } - - private ScmInfo(String clusterId, String scmId) { - this.clusterId = clusterId; - this.scmId = scmId; - } - - /** - * Gets the clusterId from the Version file. - * @return ClusterId - */ - public String getClusterId() { - return clusterId; - } - - /** - * Gets the SCM Id from the Version file. - * @return SCM Id - */ - public String getScmId() { - return scmId; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java deleted file mode 100644 index bae0758fddb8b..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; - -/** - * This class represents the reply from XceiverClient. - */ -public class XceiverClientReply { - - private CompletableFuture response; - private Long logIndex; - - /** - * List of datanodes where the command got executed and reply is received. - * If there is an exception in the reply, these datanodes will inform - * about the servers where there is a failure. - */ - private List datanodes; - - public XceiverClientReply( - CompletableFuture response) { - this(response, null); - } - - public XceiverClientReply( - CompletableFuture response, - List datanodes) { - this.logIndex = (long) 0; - this.response = response; - this.datanodes = datanodes == null ? new ArrayList<>() : datanodes; - } - - public CompletableFuture getResponse() { - return response; - } - - public long getLogIndex() { - return logIndex; - } - - public void setLogIndex(Long logIndex) { - this.logIndex = logIndex; - } - - public List getDatanodes() { - return datanodes; - } - - public void addDatanode(DatanodeDetails dn) { - datanodes.add(dn); - } - - public void setResponse( - CompletableFuture response) { - this.response = response; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java deleted file mode 100644 index 5631badf44c93..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction; - -/** - * A Client for the storageContainer protocol. - */ -public abstract class XceiverClientSpi implements Closeable { - - final private AtomicInteger referenceCount; - private boolean isEvicted; - - XceiverClientSpi() { - this.referenceCount = new AtomicInteger(0); - this.isEvicted = false; - } - - void incrementReference() { - this.referenceCount.incrementAndGet(); - } - - void decrementReference() { - this.referenceCount.decrementAndGet(); - cleanup(); - } - - void setEvicted() { - isEvicted = true; - cleanup(); - } - - // close the xceiverClient only if, - // 1) there is no refcount on the client - // 2) it has been evicted from the cache. - private void cleanup() { - if (referenceCount.get() == 0 && isEvicted) { - close(); - } - } - - @VisibleForTesting - public int getRefcount() { - return referenceCount.get(); - } - - /** - * Connects to the leader in the pipeline. - */ - public abstract void connect() throws Exception; - - /** - * Connects to the leader in the pipeline using encoded token. To be used - * in a secure cluster. - */ - public abstract void connect(String encodedToken) throws Exception; - - @Override - public abstract void close(); - - /** - * Returns the pipeline of machines that host the container used by this - * client. - * - * @return pipeline of machines that host the container - */ - public abstract Pipeline getPipeline(); - - /** - * Sends a given command to server and gets the reply back. - * @param request Request - * @return Response to the command - * @throws IOException - */ - public ContainerCommandResponseProto sendCommand( - ContainerCommandRequestProto request) throws IOException { - try { - XceiverClientReply reply; - reply = sendCommandAsync(request); - ContainerCommandResponseProto responseProto = reply.getResponse().get(); - return responseProto; - } catch (ExecutionException | InterruptedException e) { - throw new IOException("Failed to command " + request, e); - } - } - - /** - * Sends a given command to server and gets the reply back along with - * the server associated info. - * @param request Request - * @param validators functions to validate the response - * @return Response to the command - * @throws IOException - */ - public ContainerCommandResponseProto sendCommand( - ContainerCommandRequestProto request, List validators) - throws IOException { - try { - XceiverClientReply reply; - reply = sendCommandAsync(request); - ContainerCommandResponseProto responseProto = reply.getResponse().get(); - for (CheckedBiFunction function : validators) { - function.apply(request, responseProto); - } - return responseProto; - } catch (ExecutionException | InterruptedException e) { - throw new IOException("Failed to command " + request, e); - } - } - - /** - * Sends a given command to server gets a waitable future back. - * - * @param request Request - * @return Response to the command - * @throws IOException - */ - public abstract XceiverClientReply - sendCommandAsync(ContainerCommandRequestProto request) - throws IOException, ExecutionException, InterruptedException; - - /** - * Returns pipeline Type. - * - * @return - {Stand_Alone, Ratis or Chained} - */ - public abstract HddsProtos.ReplicationType getPipelineType(); - - /** - * Check if an specfic commitIndex is replicated to majority/all servers. - * @param index index to watch for - * @param timeout timeout provided for the watch operation to complete - * @return reply containing the min commit index replicated to all or majority - * servers in case of a failure - * @throws InterruptedException - * @throws ExecutionException - * @throws TimeoutException - * @throws IOException - */ - public abstract XceiverClientReply watchForCommit(long index, long timeout) - throws InterruptedException, ExecutionException, TimeoutException, - IOException; - - /** - * returns the min commit index replicated to all servers. - * @return min commit index replicated to all servers. - */ - public abstract long getReplicatedMinCommitIndex(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java deleted file mode 100644 index 226ceda9255ad..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ /dev/null @@ -1,241 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.client; - -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; - -/** - * The interface to call into underlying container layer. - * - * Written as interface to allow easy testing: implement a mock container layer - * for standalone testing of CBlock API without actually calling into remote - * containers. Actual container layer can simply re-implement this. - * - * NOTE this is temporarily needed class. When SCM containers are full-fledged, - * this interface will likely be removed. - */ -@InterfaceStability.Unstable -public interface ScmClient extends Closeable { - /** - * Creates a Container on SCM and returns the pipeline. - * @return ContainerInfo - * @throws IOException - */ - ContainerWithPipeline createContainer(String owner) throws IOException; - - /** - * Gets a container by Name -- Throws if the container does not exist. - * @param containerId - Container ID - * @return Pipeline - * @throws IOException - */ - ContainerInfo getContainer(long containerId) throws IOException; - - /** - * Gets a container by Name -- Throws if the container does not exist. - * @param containerId - Container ID - * @return ContainerWithPipeline - * @throws IOException - */ - ContainerWithPipeline getContainerWithPipeline(long containerId) - throws IOException; - - /** - * Close a container. - * - * @param containerId - ID of the container. - * @param pipeline - Pipeline where the container is located. - * @throws IOException - */ - void closeContainer(long containerId, Pipeline pipeline) throws IOException; - - /** - * Close a container. - * - * @param containerId - ID of the container. - * @throws IOException - */ - void closeContainer(long containerId) throws IOException; - - /** - * Deletes an existing container. - * @param containerId - ID of the container. - * @param pipeline - Pipeline that represents the container. - * @param force - true to forcibly delete the container. - * @throws IOException - */ - void deleteContainer(long containerId, Pipeline pipeline, boolean force) - throws IOException; - - /** - * Deletes an existing container. - * @param containerId - ID of the container. - * @param force - true to forcibly delete the container. - * @throws IOException - */ - void deleteContainer(long containerId, boolean force) throws IOException; - - /** - * Lists a range of containers and get their info. - * - * @param startContainerID start containerID. - * @param count count must be {@literal >} 0. - * - * @return a list of pipeline. - * @throws IOException - */ - List listContainer(long startContainerID, - int count) throws IOException; - - /** - * Read meta data from an existing container. - * @param containerID - ID of the container. - * @param pipeline - Pipeline where the container is located. - * @return ContainerInfo - * @throws IOException - */ - ContainerDataProto readContainer(long containerID, Pipeline pipeline) - throws IOException; - - /** - * Read meta data from an existing container. - * @param containerID - ID of the container. - * @return ContainerInfo - * @throws IOException - */ - ContainerDataProto readContainer(long containerID) - throws IOException; - - /** - * Gets the container size -- Computed by SCM from Container Reports. - * @param containerID - ID of the container. - * @return number of bytes used by this container. - * @throws IOException - */ - long getContainerSize(long containerID) throws IOException; - - /** - * Creates a Container on SCM and returns the pipeline. - * @param type - Replication Type. - * @param replicationFactor - Replication Factor - * @return ContainerInfo - * @throws IOException - in case of error. - */ - ContainerWithPipeline createContainer(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor replicationFactor, - String owner) throws IOException; - - /** - * Returns a set of Nodes that meet a query criteria. - * @param nodeStatuses - Criteria that we want the node to have. - * @param queryScope - Query scope - Cluster or pool. - * @param poolName - if it is pool, a pool name is required. - * @return A set of nodes that meet the requested criteria. - * @throws IOException - */ - List queryNode(HddsProtos.NodeState nodeStatuses, - HddsProtos.QueryScope queryScope, String poolName) throws IOException; - - /** - * Creates a specified replication pipeline. - * @param type - Type - * @param factor - Replication factor - * @param nodePool - Set of machines. - * @throws IOException - */ - Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool) - throws IOException; - - /** - * Returns the list of active Pipelines. - * - * @return list of Pipeline - * @throws IOException in case of any exception - */ - List listPipelines() throws IOException; - - /** - * Activates the pipeline given a pipeline ID. - * - * @param pipelineID PipelineID to activate. - * @throws IOException In case of exception while activating the pipeline - */ - void activatePipeline(HddsProtos.PipelineID pipelineID) throws IOException; - - /** - * Deactivates the pipeline given a pipeline ID. - * - * @param pipelineID PipelineID to deactivate. - * @throws IOException In case of exception while deactivating the pipeline - */ - void deactivatePipeline(HddsProtos.PipelineID pipelineID) throws IOException; - - /** - * Closes the pipeline given a pipeline ID. - * - * @param pipelineID PipelineID to close. - * @throws IOException In case of exception while closing the pipeline - */ - void closePipeline(HddsProtos.PipelineID pipelineID) throws IOException; - - /** - * Check if SCM is in safe mode. - * - * @return Returns true if SCM is in safe mode else returns false. - * @throws IOException - */ - boolean inSafeMode() throws IOException; - - /** - * Force SCM out of safe mode. - * - * @return returns true if operation is successful. - * @throws IOException - */ - boolean forceExitSafeMode() throws IOException; - - /** - * Start ReplicationManager. - */ - void startReplicationManager() throws IOException; - - /** - * Stop ReplicationManager. - */ - void stopReplicationManager() throws IOException; - - /** - * Returns ReplicationManager status. - * - * @return True if ReplicationManager is running, false otherwise. - */ - boolean getReplicationManagerStatus() throws IOException; - - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java deleted file mode 100644 index e2f7033d7fa61..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.client; - -/** - * This package contains classes for the client of the storage container - * protocol. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerException.java deleted file mode 100644 index 9d37dfb1f3350..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerException.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import java.io.IOException; - -/** - * Signals that ContainerException of some sort has occurred. This is parent - * of all the exceptions thrown by ContainerManager. - */ -public class ContainerException extends IOException { - - /** - * Constructs an {@code ContainerException} with {@code null} - * as its error detail message. - */ - public ContainerException() { - super(); - } - - /** - * Constructs an {@code ContainerException} with the specified detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public ContainerException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java deleted file mode 100644 index bb44da4e78e58..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.container; - -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; -import org.apache.commons.lang3.builder.CompareToBuilder; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; - -/** - * Container ID is an integer that is a value between 1..MAX_CONTAINER ID. - *

- * We are creating a specific type for this to avoid mixing this with - * normal integers in code. - */ -public final class ContainerID implements Comparable { - - private final long id; - - // TODO: make this private. - /** - * Constructs ContainerID. - * - * @param id int - */ - public ContainerID(long id) { - this.id = id; - } - - /** - * Factory method for creation of ContainerID. - * @param containerID long - * @return ContainerID. - */ - public static ContainerID valueof(final long containerID) { - Preconditions.checkState(containerID > 0, - "Container ID should be a positive long. "+ containerID); - return new ContainerID(containerID); - } - - /** - * Returns int representation of ID. - * - * @return int - */ - public long getId() { - return id; - } - - public byte[] getBytes() { - return Longs.toByteArray(id); - } - - @Override - public boolean equals(final Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - final ContainerID that = (ContainerID) o; - - return new EqualsBuilder() - .append(getId(), that.getId()) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(61, 71) - .append(getId()) - .toHashCode(); - } - - @Override - public int compareTo(final ContainerID that) { - Preconditions.checkNotNull(that); - return new CompareToBuilder() - .append(this.getId(), that.getId()) - .build(); - } - - @Override - public String toString() { - return "#" + id; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java deleted file mode 100644 index 5c58e92d3c5d9..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java +++ /dev/null @@ -1,471 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import static java.lang.Math.max; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.PropertyAccessor; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.google.common.base.Preconditions; -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import java.util.Arrays; -import java.util.Comparator; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.util.Time; - -/** - * Class wraps ozone container info. - */ -public class ContainerInfo implements Comparator, - Comparable, Externalizable { - - private static final ObjectWriter WRITER; - private static final String SERIALIZATION_ERROR_MSG = "Java serialization not" - + " supported. Use protobuf instead."; - - static { - ObjectMapper mapper = new ObjectMapper(); - mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY); - mapper - .setVisibility(PropertyAccessor.GETTER, JsonAutoDetect.Visibility.NONE); - WRITER = mapper.writerWithDefaultPrettyPrinter(); - } - - private HddsProtos.LifeCycleState state; - @JsonIgnore - private PipelineID pipelineID; - private ReplicationFactor replicationFactor; - private ReplicationType replicationType; - private long usedBytes; - private long numberOfKeys; - private long lastUsed; - // The wall-clock ms since the epoch at which the current state enters. - private long stateEnterTime; - private String owner; - private long containerID; - private long deleteTransactionId; - // The sequenceId of a close container cannot change, and all the - // container replica should have the same sequenceId. - private long sequenceId; - - /** - * Allows you to maintain private data on ContainerInfo. This is not - * serialized via protobuf, just allows us to maintain some private data. - */ - @JsonIgnore - private byte[] data; - - @SuppressWarnings("parameternumber") - ContainerInfo( - long containerID, - HddsProtos.LifeCycleState state, - PipelineID pipelineID, - long usedBytes, - long numberOfKeys, - long stateEnterTime, - String owner, - long deleteTransactionId, - long sequenceId, - ReplicationFactor replicationFactor, - ReplicationType repType) { - this.containerID = containerID; - this.pipelineID = pipelineID; - this.usedBytes = usedBytes; - this.numberOfKeys = numberOfKeys; - this.lastUsed = Time.monotonicNow(); - this.state = state; - this.stateEnterTime = stateEnterTime; - this.owner = owner; - this.deleteTransactionId = deleteTransactionId; - this.sequenceId = sequenceId; - this.replicationFactor = replicationFactor; - this.replicationType = repType; - } - - /** - * Needed for serialization findbugs. - */ - public ContainerInfo() { - } - - public static ContainerInfo fromProtobuf(HddsProtos.ContainerInfoProto info) { - ContainerInfo.Builder builder = new ContainerInfo.Builder(); - return builder.setPipelineID( - PipelineID.getFromProtobuf(info.getPipelineID())) - .setUsedBytes(info.getUsedBytes()) - .setNumberOfKeys(info.getNumberOfKeys()) - .setState(info.getState()) - .setStateEnterTime(info.getStateEnterTime()) - .setOwner(info.getOwner()) - .setContainerID(info.getContainerID()) - .setDeleteTransactionId(info.getDeleteTransactionId()) - .setReplicationFactor(info.getReplicationFactor()) - .setReplicationType(info.getReplicationType()) - .build(); - } - - public long getContainerID() { - return containerID; - } - - public HddsProtos.LifeCycleState getState() { - return state; - } - - public void setState(HddsProtos.LifeCycleState state) { - this.state = state; - } - - public long getStateEnterTime() { - return stateEnterTime; - } - - public ReplicationFactor getReplicationFactor() { - return replicationFactor; - } - - public PipelineID getPipelineID() { - return pipelineID; - } - - public long getUsedBytes() { - return usedBytes; - } - - public void setUsedBytes(long value) { - usedBytes = value; - } - - public long getNumberOfKeys() { - return numberOfKeys; - } - - public void setNumberOfKeys(long value) { - numberOfKeys = value; - } - - public long getDeleteTransactionId() { - return deleteTransactionId; - } - - public long getSequenceId() { - return sequenceId; - } - - public void updateDeleteTransactionId(long transactionId) { - deleteTransactionId = max(transactionId, deleteTransactionId); - } - - public void updateSequenceId(long sequenceID) { - assert (isOpen() || state == HddsProtos.LifeCycleState.QUASI_CLOSED); - sequenceId = max(sequenceID, sequenceId); - } - - public ContainerID containerID() { - return new ContainerID(getContainerID()); - } - - /** - * Gets the last used time from SCM's perspective. - * - * @return time in milliseconds. - */ - public long getLastUsed() { - return lastUsed; - } - - public ReplicationType getReplicationType() { - return replicationType; - } - - public void updateLastUsedTime() { - lastUsed = Time.monotonicNow(); - } - - public HddsProtos.ContainerInfoProto getProtobuf() { - HddsProtos.ContainerInfoProto.Builder builder = - HddsProtos.ContainerInfoProto.newBuilder(); - Preconditions.checkState(containerID > 0); - return builder.setContainerID(getContainerID()) - .setUsedBytes(getUsedBytes()) - .setNumberOfKeys(getNumberOfKeys()).setState(getState()) - .setStateEnterTime(getStateEnterTime()).setContainerID(getContainerID()) - .setDeleteTransactionId(getDeleteTransactionId()) - .setPipelineID(getPipelineID().getProtobuf()) - .setReplicationFactor(getReplicationFactor()) - .setReplicationType(getReplicationType()) - .setOwner(getOwner()) - .build(); - } - - public String getOwner() { - return owner; - } - - public void setOwner(String owner) { - this.owner = owner; - } - - @Override - public String toString() { - return "ContainerInfo{" - + "id=" + containerID - + ", state=" + state - + ", pipelineID=" + pipelineID - + ", stateEnterTime=" + stateEnterTime - + ", owner=" + owner - + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - ContainerInfo that = (ContainerInfo) o; - - return new EqualsBuilder() - .append(getContainerID(), that.getContainerID()) - - // TODO : Fix this later. If we add these factors some tests fail. - // So Commenting this to continue and will enforce this with - // Changes in pipeline where we remove Container Name to - // SCMContainerinfo from Pipeline. - // .append(pipeline.getFactor(), that.pipeline.getFactor()) - // .append(pipeline.getType(), that.pipeline.getType()) - .append(owner, that.owner) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(11, 811) - .append(getContainerID()) - .append(getOwner()) - .toHashCode(); - } - - /** - * Compares its two arguments for order. Returns a negative integer, zero, or - * a positive integer as the first argument is less than, equal to, or greater - * than the second.

- * - * @param o1 the first object to be compared. - * @param o2 the second object to be compared. - * @return a negative integer, zero, or a positive integer as the first - * argument is less than, equal to, or greater than the second. - * @throws NullPointerException if an argument is null and this comparator - * does not permit null arguments - * @throws ClassCastException if the arguments' types prevent them from - * being compared by this comparator. - */ - @Override - public int compare(ContainerInfo o1, ContainerInfo o2) { - return Long.compare(o1.getLastUsed(), o2.getLastUsed()); - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less than, - * equal to, or greater than the specified object. - * - * @param o the object to be compared. - * @return a negative integer, zero, or a positive integer as this object is - * less than, equal to, or greater than the specified object. - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - @Override - public int compareTo(ContainerInfo o) { - return this.compare(this, o); - } - - /** - * Returns a JSON string of this object. - * - * @return String - json string - * @throws IOException - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * Returns private data that is set on this containerInfo. - * - * @return blob, the user can interpret it any way they like. - */ - public byte[] getData() { - if (this.data != null) { - return Arrays.copyOf(this.data, this.data.length); - } else { - return null; - } - } - - /** - * Set private data on ContainerInfo object. - * - * @param data -- private data. - */ - public void setData(byte[] data) { - if (data != null) { - this.data = Arrays.copyOf(data, data.length); - } - } - - /** - * Throws IOException as default java serialization is not supported. Use - * serialization via protobuf instead. - * - * @param out the stream to write the object to - * @throws IOException Includes any I/O exceptions that may occur - * @serialData Overriding methods should use this tag to describe - * the data layout of this Externalizable object. - * List the sequence of element types and, if possible, - * relate the element to a public/protected field and/or - * method of this Externalizable class. - */ - @Override - public void writeExternal(ObjectOutput out) throws IOException { - throw new IOException(SERIALIZATION_ERROR_MSG); - } - - /** - * Throws IOException as default java serialization is not supported. Use - * serialization via protobuf instead. - * - * @param in the stream to read data from in order to restore the object - * @throws IOException if I/O errors occur - * @throws ClassNotFoundException If the class for an object being - * restored cannot be found. - */ - @Override - public void readExternal(ObjectInput in) - throws IOException, ClassNotFoundException { - throw new IOException(SERIALIZATION_ERROR_MSG); - } - - /** - * Builder class for ContainerInfo. - */ - public static class Builder { - private HddsProtos.LifeCycleState state; - private long used; - private long keys; - private long stateEnterTime; - private String owner; - private long containerID; - private long deleteTransactionId; - private long sequenceId; - private PipelineID pipelineID; - private ReplicationFactor replicationFactor; - private ReplicationType replicationType; - - public Builder setReplicationType( - ReplicationType repType) { - this.replicationType = repType; - return this; - } - - public Builder setPipelineID(PipelineID pipelineId) { - this.pipelineID = pipelineId; - return this; - } - - public Builder setReplicationFactor(ReplicationFactor repFactor) { - this.replicationFactor = repFactor; - return this; - } - - public Builder setContainerID(long id) { - Preconditions.checkState(id >= 0); - this.containerID = id; - return this; - } - - public Builder setState(HddsProtos.LifeCycleState lifeCycleState) { - this.state = lifeCycleState; - return this; - } - - public Builder setUsedBytes(long bytesUsed) { - this.used = bytesUsed; - return this; - } - - public Builder setNumberOfKeys(long keyCount) { - this.keys = keyCount; - return this; - } - - public Builder setStateEnterTime(long time) { - this.stateEnterTime = time; - return this; - } - - public Builder setOwner(String containerOwner) { - this.owner = containerOwner; - return this; - } - - public Builder setDeleteTransactionId(long deleteTransactionID) { - this.deleteTransactionId = deleteTransactionID; - return this; - } - - public Builder setSequenceId(long sequenceID) { - this.sequenceId = sequenceID; - return this; - } - - public ContainerInfo build() { - return new ContainerInfo(containerID, state, pipelineID, - used, keys, stateEnterTime, owner, deleteTransactionId, - sequenceId, replicationFactor, replicationType); - } - } - - /** - * Check if a container is in open state, this will check if the - * container is either open or closing state. Any containers in these states - * is managed as an open container by SCM. - */ - public boolean isOpen() { - return state == HddsProtos.LifeCycleState.OPEN - || state == HddsProtos.LifeCycleState.CLOSING; - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerNotFoundException.java deleted file mode 100644 index 3eebcce8403ce..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerNotFoundException.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -/** - * Signals that a container is missing from ContainerManager. - */ -public class ContainerNotFoundException extends ContainerException { - - /** - * Constructs an {@code ContainerNotFoundException} with {@code null} - * as its error detail message. - */ - public ContainerNotFoundException() { - super(); - } - - /** - * Constructs an {@code ContainerNotFoundException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public ContainerNotFoundException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaNotFoundException.java deleted file mode 100644 index fdbc18b1191e8..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaNotFoundException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -/** - * Signals that a ContainerReplica is missing from the Container in - * ContainerManager. - */ -public class ContainerReplicaNotFoundException extends ContainerException { - - /** - * Constructs an {@code ContainerReplicaNotFoundException} with {@code null} - * as its error detail message. - */ - public ContainerReplicaNotFoundException() { - super(); - } - - /** - * Constructs an {@code ContainerReplicaNotFoundException} with the - * specified detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public ContainerReplicaNotFoundException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java deleted file mode 100644 index 7ac0401af1174..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container.common.helpers; - -import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; - -/** - * Allocated block wraps the result returned from SCM#allocateBlock which - * contains a Pipeline and the key. - */ -public final class AllocatedBlock { - private Pipeline pipeline; - private ContainerBlockID containerBlockID; - - /** - * Builder for AllocatedBlock. - */ - public static class Builder { - private Pipeline pipeline; - private ContainerBlockID containerBlockID; - - public Builder setPipeline(Pipeline p) { - this.pipeline = p; - return this; - } - - public Builder setContainerBlockID(ContainerBlockID blockId) { - this.containerBlockID = blockId; - return this; - } - - public AllocatedBlock build() { - return new AllocatedBlock(pipeline, containerBlockID); - } - } - - private AllocatedBlock(Pipeline pipeline, ContainerBlockID containerBlockID) { - this.pipeline = pipeline; - this.containerBlockID = containerBlockID; - } - - public Pipeline getPipeline() { - return pipeline; - } - - public ContainerBlockID getBlockID() { - return containerBlockID; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java deleted file mode 100644 index 86f5a66cf4ca3..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.common.helpers; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -/** - * Exceptions thrown when a block is yet to be committed on the datanode. - */ -public class BlockNotCommittedException extends StorageContainerException { - - /** - * Constructs an {@code IOException} with the specified detail message. - * - * @param message The detail message (which is saved for later retrieval by - * the {@link #getMessage()} method) - */ - public BlockNotCommittedException(String message) { - super(message, ContainerProtos.Result.BLOCK_NOT_COMMITTED); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java deleted file mode 100644 index 4e406e6e97f45..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.common.helpers; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -/** - * Exceptions thrown when a write/update opearation is done on non-open - * container. - */ -public class ContainerNotOpenException extends StorageContainerException { - - /** - * Constructs an {@code IOException} with the specified detail message. - * - * @param message The detail message (which is saved for later retrieval by - * the {@link #getMessage()} method) - */ - public ContainerNotOpenException(String message) { - super(message, ContainerProtos.Result.CONTAINER_NOT_OPEN); - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java deleted file mode 100644 index 5b01bd2c652bc..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container.common.helpers; - -import java.util.Comparator; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.UnknownPipelineStateException; - -/** - * Class wraps ozone container info. - */ -public class ContainerWithPipeline implements Comparator, - Comparable { - - private final ContainerInfo containerInfo; - private final Pipeline pipeline; - - public ContainerWithPipeline(ContainerInfo containerInfo, Pipeline pipeline) { - this.containerInfo = containerInfo; - this.pipeline = pipeline; - } - - public ContainerInfo getContainerInfo() { - return containerInfo; - } - - public Pipeline getPipeline() { - return pipeline; - } - - public static ContainerWithPipeline fromProtobuf( - HddsProtos.ContainerWithPipeline allocatedContainer) - throws UnknownPipelineStateException { - return new ContainerWithPipeline( - ContainerInfo.fromProtobuf(allocatedContainer.getContainerInfo()), - Pipeline.getFromProtobuf(allocatedContainer.getPipeline())); - } - - public HddsProtos.ContainerWithPipeline getProtobuf() - throws UnknownPipelineStateException { - HddsProtos.ContainerWithPipeline.Builder builder = - HddsProtos.ContainerWithPipeline.newBuilder(); - builder.setContainerInfo(getContainerInfo().getProtobuf()) - .setPipeline(getPipeline().getProtobufMessage()); - - return builder.build(); - } - - - @Override - public String toString() { - return containerInfo.toString() + " | " + pipeline.toString(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - ContainerWithPipeline that = (ContainerWithPipeline) o; - - return new EqualsBuilder() - .append(getContainerInfo(), that.getContainerInfo()) - .append(getPipeline(), that.getPipeline()) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(11, 811) - .append(getContainerInfo()) - .append(getPipeline()) - .toHashCode(); - } - - /** - * Compares its two arguments for order. Returns a negative integer, zero, or - * a positive integer as the first argument is less than, equal to, or greater - * than the second.

- * - * @param o1 the first object to be compared. - * @param o2 the second object to be compared. - * @return a negative integer, zero, or a positive integer as the first - * argument is less than, equal to, or greater than the second. - * @throws NullPointerException if an argument is null and this comparator - * does not permit null arguments - * @throws ClassCastException if the arguments' types prevent them from - * being compared by this comparator. - */ - @Override - public int compare(ContainerWithPipeline o1, ContainerWithPipeline o2) { - return o1.getContainerInfo().compareTo(o2.getContainerInfo()); - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less than, - * equal to, or greater than the specified object. - * - * @param o the object to be compared. - * @return a negative integer, zero, or a positive integer as this object is - * less than, equal to, or greater than the specified object. - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - @Override - public int compareTo(ContainerWithPipeline o) { - return this.compare(this, o); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java deleted file mode 100644 index 5f5aaceb16a21..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.common.helpers; - -import org.apache.hadoop.hdds.client.BlockID; - -import static org.apache.hadoop.hdds.protocol.proto - .ScmBlockLocationProtocolProtos.DeleteScmBlockResult; - -/** - * Class wraps storage container manager block deletion results. - */ -public class DeleteBlockResult { - private BlockID blockID; - private DeleteScmBlockResult.Result result; - - public DeleteBlockResult(final BlockID blockID, - final DeleteScmBlockResult.Result result) { - this.blockID = blockID; - this.result = result; - } - - /** - * Get block id deleted. - * @return block id. - */ - public BlockID getBlockID() { - return blockID; - } - - /** - * Get key deletion result. - * @return key deletion result. - */ - public DeleteScmBlockResult.Result getResult() { - return result; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java deleted file mode 100644 index eb215d63a4694..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.common.helpers; - - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; - -import java.util.ArrayList; -import java.util.List; -import java.util.Collection; - -/** - * This class contains set of dns and containers which ozone client provides - * to be handed over to SCM when block allocation request comes. - */ -public class ExcludeList { - - private final List datanodes; - private final List containerIds; - private final List pipelineIds; - - - public ExcludeList() { - datanodes = new ArrayList<>(); - containerIds = new ArrayList<>(); - pipelineIds = new ArrayList<>(); - } - - public List getContainerIds() { - return containerIds; - } - - public List getDatanodes() { - return datanodes; - } - - public void addDatanodes(Collection dns) { - datanodes.addAll(dns); - } - - public void addDatanode(DatanodeDetails dn) { - datanodes.add(dn); - } - - public void addConatinerId(ContainerID containerId) { - containerIds.add(containerId); - } - - public void addPipeline(PipelineID pipelineId) { - pipelineIds.add(pipelineId); - } - - public List getPipelineIds() { - return pipelineIds; - } - - public HddsProtos.ExcludeListProto getProtoBuf() { - HddsProtos.ExcludeListProto.Builder builder = - HddsProtos.ExcludeListProto.newBuilder(); - containerIds - .forEach(id -> builder.addContainerIds(id.getId())); - datanodes.forEach(dn -> { - builder.addDatanodes(dn.getUuidString()); - }); - pipelineIds.forEach(pipelineID -> { - builder.addPipelineIds(pipelineID.getProtobuf()); - }); - return builder.build(); - } - - public static ExcludeList getFromProtoBuf( - HddsProtos.ExcludeListProto excludeListProto) { - ExcludeList excludeList = new ExcludeList(); - excludeListProto.getContainerIdsList().forEach(id -> { - excludeList.addConatinerId(ContainerID.valueof(id)); - }); - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - excludeListProto.getDatanodesList().forEach(dn -> { - builder.setUuid(dn); - excludeList.addDatanode(builder.build()); - }); - excludeListProto.getPipelineIdsList().forEach(pipelineID -> { - excludeList.addPipeline(PipelineID.getFromProtobuf(pipelineID)); - }); - return excludeList; - } - - public void clear() { - datanodes.clear(); - containerIds.clear(); - pipelineIds.clear(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java deleted file mode 100644 index 1378d1ab70ad0..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.common.helpers; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -/** - * Exceptions thrown when a container is in invalid state while doing a I/O. - */ -public class InvalidContainerStateException extends StorageContainerException { - - /** - * Constructs an {@code IOException} with the specified detail message. - * - * @param message The detail message (which is saved for later retrieval by - * the {@link #getMessage()} method) - */ - public InvalidContainerStateException(String message) { - super(message, ContainerProtos.Result.INVALID_CONTAINER_STATE); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java deleted file mode 100644 index f1405fff94617..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.common.helpers; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -import java.io.IOException; - -/** - * Exceptions thrown from the Storage Container. - */ -public class StorageContainerException extends IOException { - private ContainerProtos.Result result; - - /** - * Constructs an {@code IOException} with {@code null} - * as its error detail message. - */ - public StorageContainerException(ContainerProtos.Result result) { - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified detail message. - * - * @param message The detail message (which is saved for later retrieval by - * the {@link #getMessage()} method) - * @param result - The result code - */ - public StorageContainerException(String message, - ContainerProtos.Result result) { - super(message); - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified detail message - * and cause. - *

- *

Note that the detail message associated with {@code cause} is - * not automatically incorporated into this exception's detail - * message. - * - * @param message The detail message (which is saved for later retrieval by - * the {@link #getMessage()} method) - * - * @param cause The cause (which is saved for later retrieval by the {@link - * #getCause()} method). (A null value is permitted, and indicates that the - * cause is nonexistent or unknown.) - * - * @param result - The result code - * @since 1.6 - */ - public StorageContainerException(String message, Throwable cause, - ContainerProtos.Result result) { - super(message, cause); - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified cause and a - * detail message of {@code (cause==null ? null : cause.toString())} - * (which typically contains the class and detail message of {@code cause}). - * This constructor is useful for IO exceptions that are little more - * than wrappers for other throwables. - * - * @param cause The cause (which is saved for later retrieval by the {@link - * #getCause()} method). (A null value is permitted, and indicates that the - * cause is nonexistent or unknown.) - * @param result - The result code - * @since 1.6 - */ - public StorageContainerException(Throwable cause, ContainerProtos.Result - result) { - super(cause); - this.result = result; - } - - /** - * Returns Result. - * - * @return Result. - */ - public ContainerProtos.Result getResult() { - return result; - } - - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java deleted file mode 100644 index ffe0d3d4d99ab..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.common.helpers; -/** - Contains protocol buffer helper classes and utilites used in - impl. - **/ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java deleted file mode 100644 index d13dcb1f6c40f..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java deleted file mode 100644 index 52ce7964b6769..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; - -import java.io.IOException; -import java.util.List; - -/** - * A ContainerPlacementPolicy support choosing datanodes to build replication - * pipeline with specified constraints. - */ -public interface ContainerPlacementPolicy { - - /** - * Given the replication factor and size required, return set of datanodes - * that satisfy the nodes and size requirement. - * - * @param excludedNodes - list of nodes to be excluded. - * @param favoredNodes - list of nodes preferred. - * @param nodesRequired - number of datanodes required. - * @param sizeRequired - size required for the container or block. - * @return list of datanodes chosen. - * @throws IOException - */ - List chooseDatanodes(List excludedNodes, - List favoredNodes, int nodesRequired, long sizeRequired) - throws IOException; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java deleted file mode 100644 index dac4752fe66fa..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.algorithms; -/** - Contains container placement policy interface definition. - **/ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java deleted file mode 100644 index db1f82ae411d0..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java +++ /dev/null @@ -1,127 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.exceptions; - -import java.io.IOException; - -/** - * Exception thrown by SCM. - */ -public class SCMException extends IOException { - private final ResultCodes result; - - /** - * Constructs an {@code IOException} with {@code null} - * as its error detail message. - */ - public SCMException(ResultCodes result) { - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified detail message. - * - * @param message The detail message (which is saved for later retrieval by - * the - * {@link #getMessage()} method) - */ - public SCMException(String message, ResultCodes result) { - super(message); - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified detail message - * and cause. - *

- *

Note that the detail message associated with {@code cause} is - * not automatically incorporated into this exception's detail - * message. - * - * @param message The detail message (which is saved for later retrieval by - * the - * {@link #getMessage()} method) - * @param cause The cause (which is saved for later retrieval by the {@link - * #getCause()} method). (A null value is permitted, and indicates that the - * cause is nonexistent or unknown.) - * @since 1.6 - */ - public SCMException(String message, Throwable cause, ResultCodes result) { - super(message, cause); - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified cause and a - * detail message of {@code (cause==null ? null : cause.toString())} - * (which typically contains the class and detail message of {@code cause}). - * This constructor is useful for IO exceptions that are little more - * than wrappers for other throwables. - * - * @param cause The cause (which is saved for later retrieval by the {@link - * #getCause()} method). (A null value is permitted, and indicates that the - * cause is nonexistent or unknown.) - * @since 1.6 - */ - public SCMException(Throwable cause, ResultCodes result) { - super(cause); - this.result = result; - } - - /** - * Returns resultCode. - * @return ResultCode - */ - public ResultCodes getResult() { - return result; - } - - /** - * Error codes to make it easy to decode these exceptions. - */ - public enum ResultCodes { - OK, - FAILED_TO_LOAD_NODEPOOL, - FAILED_TO_FIND_NODE_IN_POOL, - FAILED_TO_FIND_HEALTHY_NODES, - FAILED_TO_FIND_NODES_WITH_SPACE, - FAILED_TO_FIND_SUITABLE_NODE, - INVALID_CAPACITY, - INVALID_BLOCK_SIZE, - SAFE_MODE_EXCEPTION, - FAILED_TO_LOAD_OPEN_CONTAINER, - FAILED_TO_ALLOCATE_CONTAINER, - FAILED_TO_CHANGE_CONTAINER_STATE, - FAILED_TO_CHANGE_PIPELINE_STATE, - CONTAINER_EXISTS, - FAILED_TO_FIND_CONTAINER, - FAILED_TO_FIND_CONTAINER_WITH_SPACE, - BLOCK_EXISTS, - FAILED_TO_FIND_BLOCK, - IO_EXCEPTION, - UNEXPECTED_CONTAINER_STATE, - SCM_NOT_INITIALIZED, - DUPLICATE_DATANODE, - NO_SUCH_DATANODE, - NO_REPLICA_FOUND, - FAILED_TO_FIND_ACTIVE_PIPELINE, - FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY, - FAILED_TO_ALLOCATE_ENOUGH_BLOCKS, - INTERNAL_ERROR - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java deleted file mode 100644 index 721a32b48e219..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.exceptions; -/** - Exception objects for the SCM Server. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java deleted file mode 100644 index 6cf73bf54800d..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import java.util.Collection; -import java.util.List; - -/** - * The interface defines an inner node in a network topology. - * An inner node represents network topology entities, such as data center, - * rack, switch or logical group. - */ -public interface InnerNode extends Node { - /** A factory interface to get new InnerNode instance. */ - interface Factory { - /** Construct an InnerNode from name, location, parent, level and cost. */ - N newInnerNode(String name, String location, InnerNode parent, int level, - int cost); - } - - /** - * Add node n to the subtree of this node. - * @param n node to be added - * @return true if the node is added; false otherwise - */ - boolean add(Node n); - - /** - * Remove node n from the subtree of this node. - * @param n node to be deleted - */ - void remove(Node n); - - /** - * Given a node's string representation, return a reference to the node. - * @param loc string location of the format /dc/rack/nodegroup/node - * @return null if the node is not found - */ - Node getNode(String loc); - - /** - * @return number of its all nodes at level level. Here level is a - * relative level. If level is 1, means node itself. If level is 2, means its - * direct children, and so on. - **/ - int getNumOfNodes(int level); - - /** - * Get leafIndex leaf of this subtree. - * - * @param leafIndex an indexed leaf of the node - * @return the leaf node corresponding to the given index. - */ - Node getLeaf(int leafIndex); - - /** - * Get leafIndex leaf of this subtree. - * - * @param leafIndex ode's index, start from 0, skip the nodes in - * excludedScope and excludedNodes with ancestorGen - * @param excludedScopes the excluded scopes - * @param excludedNodes nodes to be excluded. If ancestorGen is not 0, - * the chosen node will not share same ancestor with - * those in excluded nodes at the specified generation - * @param ancestorGen ignored with value is 0 - * @return the leaf node corresponding to the given index - */ - Node getLeaf(int leafIndex, List excludedScopes, - Collection excludedNodes, int ancestorGen); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java deleted file mode 100644 index f2183fc9823fe..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java +++ /dev/null @@ -1,509 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -import com.google.common.base.Preconditions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR; -import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR; - -/** - * A thread safe class that implements InnerNode interface. - */ -public class InnerNodeImpl extends NodeImpl implements InnerNode { - protected static class Factory implements InnerNode.Factory { - protected Factory() {} - - public InnerNodeImpl newInnerNode(String name, String location, - InnerNode parent, int level, int cost) { - return new InnerNodeImpl(name, location, parent, level, cost); - } - } - - static final Factory FACTORY = new Factory(); - // a map of node's network name to Node for quick search and keep - // the insert order - private final HashMap childrenMap = - new LinkedHashMap(); - // number of descendant leaves under this node - private int numOfLeaves; - // LOGGER - public static final Logger LOG = LoggerFactory.getLogger(InnerNodeImpl.class); - - /** - * Construct an InnerNode from its name, network location, parent, level and - * its cost. - **/ - protected InnerNodeImpl(String name, String location, InnerNode parent, - int level, int cost) { - super(name, location, parent, level, cost); - } - - /** @return the number of children this node has */ - private int getNumOfChildren() { - return childrenMap.size(); - } - - /** @return its leaf nodes number */ - @Override - public int getNumOfLeaves() { - return numOfLeaves; - } - - /** - * @return number of its all nodes at level level. Here level is a - * relative level. If level is 1, means node itself. If level is 2, means its - * direct children, and so on. - **/ - public int getNumOfNodes(int level) { - Preconditions.checkArgument(level > 0); - int count = 0; - if (level == 1) { - count += 1; - } else if (level == 2) { - count += getNumOfChildren(); - } else { - for (Node node: childrenMap.values()) { - if (node instanceof InnerNode) { - count += ((InnerNode)node).getNumOfNodes(level -1); - } else { - throw new RuntimeException("Cannot support Level:" + level + - " on this node " + this.toString()); - } - } - } - return count; - } - - /** - * Judge if this node is the parent of a leave node n. - * @return true if this node is the parent of n - */ - private boolean isLeafParent() { - if (childrenMap.isEmpty()) { - return true; - } - Node child = childrenMap.values().iterator().next(); - return child instanceof InnerNode ? false : true; - } - - /** - * Judge if this node is the parent of node node. - * @param node a node - * @return true if this node is the parent of n - */ - private boolean isParent(Node node) { - return node.getNetworkLocation().equals(this.getNetworkFullPath()); - } - - /** - * Add node node to the subtree of this node. - * @param node node to be added - * @return true if the node is added, false if is only updated - */ - public boolean add(Node node) { - if (!isAncestor(node)) { - throw new IllegalArgumentException(node.getNetworkName() - + ", which is located at " + node.getNetworkLocation() - + ", is not a descendant of " + this.getNetworkFullPath()); - } - if (isParent(node)) { - // this node is the parent, then add it directly - node.setParent(this); - node.setLevel(this.getLevel() + 1); - Node current = childrenMap.put(node.getNetworkName(), node); - if (current != null) { - return false; - } - } else { - // find the next level ancestor node - String ancestorName = getNextLevelAncestorName(node); - InnerNode childNode = (InnerNode)childrenMap.get(ancestorName); - if (childNode == null) { - // create a new InnerNode for this ancestor node - childNode = createChildNode(ancestorName); - childrenMap.put(childNode.getNetworkName(), childNode); - } - // add node to the subtree of the next ancestor node - if (!childNode.add(node)) { - return false; - } - } - numOfLeaves++; - return true; - } - - /** - * Remove node node from the subtree of this node. - * @param node node to be deleted - */ - public void remove(Node node) { - if (!isAncestor(node)) { - throw new IllegalArgumentException(node.getNetworkName() - + ", which is located at " + node.getNetworkLocation() - + ", is not a descendant of " + this.getNetworkFullPath()); - } - if (isParent(node)) { - // this node is the parent, remove it directly - if (childrenMap.containsKey(node.getNetworkName())) { - childrenMap.remove(node.getNetworkName()); - node.setParent(null); - } else { - throw new RuntimeException("Should not come to here. Node:" + - node.getNetworkFullPath() + ", Parent:" + - this.getNetworkFullPath()); - } - } else { - // find the next ancestor node - String ancestorName = getNextLevelAncestorName(node); - InnerNodeImpl childNode = (InnerNodeImpl)childrenMap.get(ancestorName); - Preconditions.checkNotNull(childNode, "InnerNode is deleted before leaf"); - // remove node from the parent node - childNode.remove(node); - // if the parent node has no children, remove the parent node too - if (childNode.getNumOfChildren() == 0) { - childrenMap.remove(ancestorName); - } - } - numOfLeaves--; - } - - /** - * Given a node's string representation, return a reference to the node. - * Node can be leaf node or inner node. - * - * @param loc string location of a node. If loc starts with "/", it's a - * absolute path, otherwise a relative path. Following examples - * are all accepted, - * 1. /dc1/rm1/rack1 -> an inner node - * 2. /dc1/rm1/rack1/node1 -> a leaf node - * 3. rack1/node1 -> a relative path to this node - * - * @return null if the node is not found - */ - public Node getNode(String loc) { - if (loc == null) { - return null; - } - - String fullPath = this.getNetworkFullPath(); - if (loc.equalsIgnoreCase(fullPath)) { - return this; - } - - // remove current node's location from loc when it's a absolute path - if (fullPath.equals(NetConstants.PATH_SEPARATOR_STR)) { - // current node is ROOT - if (loc.startsWith(PATH_SEPARATOR_STR)) { - loc = loc.substring(1); - } - } else if (loc.startsWith(fullPath)) { - loc = loc.substring(fullPath.length()); - // skip the separator "/" - loc = loc.substring(1); - } - - String[] path = loc.split(PATH_SEPARATOR_STR, 2); - Node child = childrenMap.get(path[0]); - if (child == null) { - return null; - } - if (path.length == 1){ - return child; - } - if (child instanceof InnerNode) { - return ((InnerNode)child).getNode(path[1]); - } else { - return null; - } - } - - /** - * get leafIndex leaf of this subtree. - * - * @param leafIndex an indexed leaf of the node - * @return the leaf node corresponding to the given index. - */ - public Node getLeaf(int leafIndex) { - Preconditions.checkArgument(leafIndex >= 0); - // children are leaves - if (isLeafParent()) { - // range check - if (leafIndex >= getNumOfChildren()) { - return null; - } - return getChildNode(leafIndex); - } else { - for(Node node : childrenMap.values()) { - InnerNodeImpl child = (InnerNodeImpl)node; - int leafCount = child.getNumOfLeaves(); - if (leafIndex < leafCount) { - return child.getLeaf(leafIndex); - } else { - leafIndex -= leafCount; - } - } - return null; - } - } - - /** - * Get leafIndex leaf of this subtree. - * - * @param leafIndex node's index, start from 0, skip the nodes in - * excludedScope and excludedNodes with ancestorGen - * @param excludedScopes the exclude scopes - * @param excludedNodes nodes to be excluded from. If ancestorGen is not 0, - * the chosen node will not share same ancestor with - * those in excluded nodes at the specified generation - * @param ancestorGen apply to excludeNodes, when value is 0, then no same - * ancestor enforcement on excludedNodes - * @return the leaf node corresponding to the given index. - * Example: - * - * / --- root - * / \ - * / \ - * / \ - * / \ - * dc1 dc2 - * / \ / \ - * / \ / \ - * / \ / \ - * rack1 rack2 rack1 rack2 - * / \ / \ / \ / \ - * n1 n2 n3 n4 n5 n6 n7 n8 - * - * Input: - * leafIndex = 2 - * excludedScope = /dc2/rack2 - * excludedNodes = {/dc1/rack1/n1} - * ancestorGen = 1 - * - * Output: - * node /dc1/rack2/n5 - * - * Explanation: - * Since excludedNodes is n1 and ancestorGen is 1, it means nodes under - * /root/dc1/rack1 are excluded. Given leafIndex start from 0, LeafIndex 2 - * means picking the 3th available node, which is n5. - * - */ - public Node getLeaf(int leafIndex, List excludedScopes, - Collection excludedNodes, int ancestorGen) { - Preconditions.checkArgument(leafIndex >= 0 && ancestorGen >= 0); - // come to leaf parent layer - if (isLeafParent()) { - return getLeafOnLeafParent(leafIndex, excludedScopes, excludedNodes); - } - - int maxLevel = NodeSchemaManager.getInstance().getMaxLevel(); - // this node's children, it's generation as the ancestor of the leaf node - int currentGen = maxLevel - this.getLevel() - 1; - // build an ancestor(children) to exclude node count map - Map countMap = - getAncestorCountMap(excludedNodes, ancestorGen, currentGen); - // nodes covered by excluded scope - Map excludedNodeCount = - getExcludedScopeNodeCount(excludedScopes); - - for (Node child : childrenMap.values()) { - int leafCount = child.getNumOfLeaves(); - // skip nodes covered by excluded scopes - for (Map.Entry entry: excludedNodeCount.entrySet()) { - if (entry.getKey().startsWith(child.getNetworkFullPath())) { - leafCount -= entry.getValue(); - } - } - // skip nodes covered by excluded nodes and ancestorGen - Integer count = countMap.get(child); - if (count != null) { - leafCount -= count; - } - if (leafIndex < leafCount) { - return ((InnerNode)child).getLeaf(leafIndex, excludedScopes, - excludedNodes, ancestorGen); - } else { - leafIndex -= leafCount; - } - } - return null; - } - - @Override - public boolean equals(Object to) { - if (to == null) { - return false; - } - if (this == to) { - return true; - } - return this.toString().equals(to.toString()); - } - - @Override - public int hashCode() { - return super.hashCode(); - } - - /** - * Get a ancestor to its excluded node count map. - * - * @param nodes a collection of leaf nodes to exclude - * @param genToExclude the ancestor generation to exclude - * @param genToReturn the ancestor generation to return the count map - * @return the map. - * example: - * - * * --- root - * / \ - * * * -- genToReturn =2 - * / \ / \ - * * * * * -- genToExclude = 1 - * /\ /\ /\ /\ - * * * * * * * * * -- nodes - */ - private Map getAncestorCountMap(Collection nodes, - int genToExclude, int genToReturn) { - Preconditions.checkState(genToExclude >= 0); - Preconditions.checkState(genToReturn >= 0); - - if (nodes == null || nodes.size() == 0) { - return Collections.emptyMap(); - } - // with the recursive call, genToReturn can be smaller than genToExclude - if (genToReturn < genToExclude) { - genToExclude = genToReturn; - } - // ancestorToExclude to ancestorToReturn map - HashMap ancestorMap = new HashMap<>(); - for (Node node: nodes) { - Node ancestorToExclude = node.getAncestor(genToExclude); - Node ancestorToReturn = node.getAncestor(genToReturn); - if (ancestorToExclude == null || ancestorToReturn == null) { - LOG.warn("Ancestor not found, node: " + node.getNetworkFullPath() + - ", generation to exclude: " + genToExclude + - ", generation to return:" + genToReturn); - continue; - } - ancestorMap.put(ancestorToExclude, ancestorToReturn); - } - // ancestorToReturn to exclude node count map - HashMap countMap = new HashMap<>(); - for (Map.Entry entry : ancestorMap.entrySet()) { - countMap.compute(entry.getValue(), - (key, n) -> (n == null ? 0 : n) + entry.getKey().getNumOfLeaves()); - } - - return countMap; - } - - /** - * Get the node with leafIndex, considering skip nodes in excludedScope - * and in excludeNodes list. - */ - private Node getLeafOnLeafParent(int leafIndex, List excludedScopes, - Collection excludedNodes) { - Preconditions.checkArgument(isLeafParent() && leafIndex >= 0); - if (leafIndex >= getNumOfChildren()) { - return null; - } - for(Node node : childrenMap.values()) { - if (excludedNodes != null && excludedNodes.contains(node)) { - continue; - } - if (excludedScopes != null && excludedScopes.size() > 0) { - if (excludedScopes.stream().anyMatch(scope -> - node.getNetworkFullPath().startsWith(scope))) { - continue; - } - } - if (leafIndex == 0) { - return node; - } - leafIndex--; - } - return null; - } - - /** - * Return child's name of this node which is an ancestor of node n. - */ - private String getNextLevelAncestorName(Node n) { - int parentPathLen = this.getNetworkFullPath().length(); - String name = n.getNetworkLocation().substring(parentPathLen); - if (name.charAt(0) == PATH_SEPARATOR) { - name = name.substring(1); - } - int index = name.indexOf(PATH_SEPARATOR); - if (index != -1) { - name = name.substring(0, index); - } - return name; - } - - /** - * Creates a child node to be added to the list of children. - * @param name The name of the child node - * @return A new inner node - * @see InnerNodeImpl(String, String, InnerNode, int) - */ - private InnerNodeImpl createChildNode(String name) { - int childLevel = this.getLevel() + 1; - int cost = NodeSchemaManager.getInstance().getCost(childLevel); - return new InnerNodeImpl(name, this.getNetworkFullPath(), this, childLevel, - cost); - } - - /** Get node with index index. */ - private Node getChildNode(int index) { - Iterator iterator = childrenMap.values().iterator(); - Node node = null; - while(index >= 0 && iterator.hasNext()) { - node = (Node)iterator.next(); - index--; - } - return node; - } - - /** Get how many leaf nodes are covered by the excludedScopes(no overlap). */ - private Map getExcludedScopeNodeCount( - List excludedScopes) { - HashMap nodeCounts = new HashMap<>(); - if (excludedScopes == null || excludedScopes.isEmpty()) { - return nodeCounts; - } - - for (String scope: excludedScopes) { - Node excludedScopeNode = getNode(scope); - nodeCounts.put(scope, excludedScopeNode == null ? 0 : - excludedScopeNode.getNumOfLeaves()); - } - return nodeCounts; - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java deleted file mode 100644 index 0e1b0769446fa..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.apache.hadoop.hdds.scm.net.NodeSchema.LayerType; - -/** - * Class to hold network topology related constants and configurations. - */ -public final class NetConstants { - private NetConstants() { - // Prevent instantiation - } - public final static char PATH_SEPARATOR = '/'; - /** Path separator as a string. */ - public final static String PATH_SEPARATOR_STR = "/"; - public final static String SCOPE_REVERSE_STR = "~"; - /** string representation of root. */ - public final static String ROOT = ""; - public final static int INNER_NODE_COST_DEFAULT = 1; - public final static int NODE_COST_DEFAULT = 0; - public final static int ANCESTOR_GENERATION_DEFAULT = 0; - public final static int ROOT_LEVEL = 1; - public final static String NODE_COST_PREFIX = "$"; - public final static String DEFAULT_RACK = "/default-rack"; - public final static String DEFAULT_NODEGROUP = "/default-nodegroup"; - public final static String DEFAULT_DATACENTER = "/default-datacenter"; - public final static String DEFAULT_REGION = "/default-dataregion"; - - // Build-in network topology node schema - public static final NodeSchema ROOT_SCHEMA = - new NodeSchema.Builder().setType(LayerType.ROOT).build(); - - public static final NodeSchema REGION_SCHEMA = - new NodeSchema.Builder().setType(LayerType.INNER_NODE) - .setDefaultName(DEFAULT_REGION).build(); - - public static final NodeSchema DATACENTER_SCHEMA = - new NodeSchema.Builder().setType(LayerType.INNER_NODE) - .setDefaultName(DEFAULT_DATACENTER).build(); - - public static final NodeSchema RACK_SCHEMA = - new NodeSchema.Builder().setType(LayerType.INNER_NODE) - .setDefaultName(DEFAULT_RACK).build(); - - public static final NodeSchema NODEGROUP_SCHEMA = - new NodeSchema.Builder().setType(LayerType.INNER_NODE) - .setDefaultName(DEFAULT_NODEGROUP).build(); - - public static final NodeSchema LEAF_SCHEMA = - new NodeSchema.Builder().setType(LayerType.LEAF_NODE).build(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java deleted file mode 100644 index 4019b1305f6a8..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java +++ /dev/null @@ -1,161 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.apache.commons.collections.CollectionUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.stream.Collectors; - -/** - * Utility class to facilitate network topology functions. - */ -public final class NetUtils { - public static final Logger LOG = LoggerFactory.getLogger(NetUtils.class); - private NetUtils() { - // Prevent instantiation - } - /** - * Normalize a path by stripping off any trailing. - * {@link NetConstants#PATH_SEPARATOR} - * @param path path to normalize. - * @return the normalised path - * If pathis empty or null, then {@link NetConstants#ROOT} is returned - */ - public static String normalize(String path) { - if (path == null || path.length() == 0) { - return NetConstants.ROOT; - } - - if (path.charAt(0) != NetConstants.PATH_SEPARATOR) { - throw new IllegalArgumentException( - "Network Location path does not start with " - + NetConstants.PATH_SEPARATOR_STR + ": " + path); - } - - // Remove any trailing NetConstants.PATH_SEPARATOR - return path.length() == 1 ? path : - path.replaceAll(NetConstants.PATH_SEPARATOR_STR + "+$", ""); - } - - /** - * Given a network topology location string, return its network topology - * depth, E.g. the depth of /dc1/rack1/ng1/node1 is 5. - */ - public static int locationToDepth(String location) { - String newLocation = normalize(location); - return newLocation.equals(NetConstants.PATH_SEPARATOR_STR) ? 1 : - newLocation.split(NetConstants.PATH_SEPARATOR_STR).length; - } - - - /** - * Remove node from mutableExcludedNodes if it's covered by excludedScope. - * Please noted that mutableExcludedNodes content might be changed after the - * function call. - */ - public static void removeDuplicate(NetworkTopology topology, - Collection mutableExcludedNodes, List mutableExcludedScopes, - int ancestorGen) { - if (CollectionUtils.isEmpty(mutableExcludedNodes) || - CollectionUtils.isEmpty(mutableExcludedScopes) || topology == null) { - return; - } - - Iterator iterator = mutableExcludedNodes.iterator(); - while (iterator.hasNext() && (!mutableExcludedScopes.isEmpty())) { - Node node = iterator.next(); - Node ancestor = topology.getAncestor(node, ancestorGen); - if (ancestor == null) { - LOG.warn("Fail to get ancestor generation " + ancestorGen + - " of node :" + node); - continue; - } - // excludedScope is child of ancestor - List duplicateList = mutableExcludedScopes.stream() - .filter(scope -> scope.startsWith(ancestor.getNetworkFullPath())) - .collect(Collectors.toList()); - mutableExcludedScopes.removeAll(duplicateList); - - // ancestor is covered by excludedScope - mutableExcludedScopes.stream().forEach(scope -> { - if (ancestor.getNetworkFullPath().startsWith(scope)) { - // remove exclude node if it's covered by excludedScope - iterator.remove(); - } - }); - } - } - - /** - * Remove node from mutableExcludedNodes if it's not part of scope - * Please noted that mutableExcludedNodes content might be changed after the - * function call. - */ - public static void removeOutscope(Collection mutableExcludedNodes, - String scope) { - if (CollectionUtils.isEmpty(mutableExcludedNodes) || scope == null) { - return; - } - synchronized (mutableExcludedNodes) { - Iterator iterator = mutableExcludedNodes.iterator(); - while (iterator.hasNext()) { - Node next = iterator.next(); - if (!next.getNetworkFullPath().startsWith(scope)) { - iterator.remove(); - } - } - } - } - - /** - * Get a ancestor list for nodes on generation generation. - * - * @param nodes a collection of leaf nodes - * @param generation the ancestor generation - * @return the ancestor list. If no ancestor is found, then a empty list is - * returned. - */ - public static List getAncestorList(NetworkTopology topology, - Collection nodes, int generation) { - List ancestorList = new ArrayList<>(); - if (topology == null || CollectionUtils.isEmpty(nodes) || - generation == 0) { - return ancestorList; - } - Iterator iterator = nodes.iterator(); - while (iterator.hasNext()) { - Node node = iterator.next(); - Node ancestor = topology.getAncestor(node, generation); - if (ancestor == null) { - LOG.warn("Fail to get ancestor generation " + generation + - " of node :" + node); - continue; - } - if (!ancestorList.contains(ancestor)) { - ancestorList.add(ancestor); - } - } - return ancestorList; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java deleted file mode 100644 index 3a2c7c0f1a5ce..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java +++ /dev/null @@ -1,229 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import java.util.Collection; -import java.util.List; - -/** - * The interface defines a network topology. - */ -public interface NetworkTopology { - /** Exception for invalid network topology detection. */ - class InvalidTopologyException extends RuntimeException { - private static final long serialVersionUID = 1L; - public InvalidTopologyException(String msg) { - super(msg); - } - } - /** - * Add a leaf node. This will be called when a new datanode is added. - * @param node node to be added; can be null - * @exception IllegalArgumentException if add a node to a leave or node to be - * added is not a leaf - */ - void add(Node node); - - /** - * Remove a node from the network topology. This will be called when a - * existing datanode is removed from the system. - * @param node node to be removed; cannot be null - */ - void remove(Node node); - - /** - * Check if the tree already contains node node. - * @param node a node - * @return true if node is already in the tree; false otherwise - */ - boolean contains(Node node); - - /** - * Compare the direct parent of each node for equality. - * @return true if their parent are the same - */ - boolean isSameParent(Node node1, Node node2); - - /** - * Compare the specified ancestor generation of each node for equality. - * ancestorGen 1 means parent. - * @return true if their specified generation ancestor are equal - */ - boolean isSameAncestor(Node node1, Node node2, int ancestorGen); - - /** - * Get the ancestor for node on generation ancestorGen. - * - * @param node the node to get ancestor - * @param ancestorGen the ancestor generation - * @return the ancestor. If no ancestor is found, then null is returned. - */ - Node getAncestor(Node node, int ancestorGen); - - /** - * Return the max level of this topology, start from 1 for ROOT. For example, - * topology like "/rack/node" has the max level '3'. - */ - int getMaxLevel(); - - /** - * Given a string representation of a node, return its reference. - * @param loc a path string representing a node, can be leaf or inner node - * @return a reference to the node; null if the node is not in the tree - */ - Node getNode(String loc); - - /** - * Given a string representation of a InnerNode, return its leaf nodes count. - * @param loc a path-like string representation of a InnerNode - * @return the number of leaf nodes, 0 if it's not an InnerNode or the node - * doesn't exist - */ - int getNumOfLeafNode(String loc); - - /** - * Return the node numbers at level level. - * @param level topology level, start from 1, which means ROOT - * @return the number of nodes on the level - */ - int getNumOfNodes(int level); - - /** - * Randomly choose a node in the scope. - * @param scope range of nodes from which a node will be chosen. If scope - * starts with ~, choose one from the all nodes except for the - * ones in scope; otherwise, choose one from scope. - * @return the chosen node - */ - Node chooseRandom(String scope); - - /** - * Randomly choose a node in the scope, ano not in the exclude scope. - * @param scope range of nodes from which a node will be chosen. cannot start - * with ~ - * @param excludedScopes the chosen nodes cannot be in these ranges. cannot - * starts with ~ - * @return the chosen node - */ - Node chooseRandom(String scope, List excludedScopes); - - /** - * Randomly choose a leaf node from scope. - * - * If scope starts with ~, choose one from the all nodes except for the - * ones in scope; otherwise, choose nodes from scope. - * If excludedNodes is given, choose a node that's not in excludedNodes. - * - * @param scope range of nodes from which a node will be chosen - * @param excludedNodes nodes to be excluded - * - * @return the chosen node - */ - Node chooseRandom(String scope, Collection excludedNodes); - - /** - * Randomly choose a leaf node from scope. - * - * If scope starts with ~, choose one from the all nodes except for the - * ones in scope; otherwise, choose nodes from scope. - * If excludedNodes is given, choose a node that's not in excludedNodes. - * - * @param scope range of nodes from which a node will be chosen - * @param excludedNodes nodes to be excluded from. - * @param ancestorGen matters when excludeNodes is not null. It means the - * ancestor generation that's not allowed to share between chosen node and the - * excludedNodes. For example, if ancestorGen is 1, means chosen node - * cannot share the same parent with excludeNodes. If value is 2, cannot - * share the same grand parent, and so on. If ancestorGen is 0, then no - * effect. - * - * @return the chosen node - */ - Node chooseRandom(String scope, Collection excludedNodes, - int ancestorGen); - - /** - * Randomly choose one node from scope, share the same generation - * ancestor with affinityNode, and exclude nodes in - * excludeScope and excludeNodes. - * - * @param scope range of nodes from which a node will be chosen, cannot start - * with ~ - * @param excludedScopes ranges of nodes to be excluded, cannot start with ~ - * @param excludedNodes nodes to be excluded - * @param affinityNode when not null, the chosen node should share the same - * ancestor with this node at generation ancestorGen. - * Ignored when value is null - * @param ancestorGen If 0, then no same generation ancestor enforcement on - * both excludedNodes and affinityNode. If greater than 0, - * then apply to affinityNode(if not null), or apply to - * excludedNodes if affinityNode is null - * @return the chosen node - */ - Node chooseRandom(String scope, List excludedScopes, - Collection excludedNodes, Node affinityNode, int ancestorGen); - - /** - * Choose the node at index index from scope, share the same - * generation ancestor with affinityNode, and exclude nodes in - * excludeScope and excludeNodes. - * - * @param leafIndex node index, exclude nodes in excludedScope and - * excludedNodes - * @param scope range of nodes from which a node will be chosen, cannot start - * with ~ - * @param excludedScopes ranges of nodes to be excluded, cannot start with ~ - * @param excludedNodes nodes to be excluded - * @param affinityNode when not null, the chosen node should share the same - * ancestor with this node at generation ancestorGen. - * Ignored when value is null - * @param ancestorGen If 0, then no same generation ancestor enforcement on - * both excludedNodes and affinityNode. If greater than 0, - * then apply to affinityNode(if not null), or apply to - * excludedNodes if affinityNode is null - * @return the chosen node - */ - Node getNode(int leafIndex, String scope, List excludedScopes, - Collection excludedNodes, Node affinityNode, int ancestorGen); - - /** Return the distance cost between two nodes - * The distance cost from one node to its parent is it's parent's cost - * The distance cost between two nodes is calculated by summing up their - * distances cost to their closest common ancestor. - * @param node1 one node - * @param node2 another node - * @return the distance cost between node1 and node2 which is zero if they - * are the same or {@link Integer#MAX_VALUE} if node1 or node2 do not belong - * to the cluster - */ - int getDistanceCost(Node node1, Node node2); - - /** - * Sort nodes array by network distance to reader to reduces network - * traffic and improves performance. - * - * As an additional twist, we also randomize the nodes at each network - * distance. This helps with load balancing when there is data skew. - * - * @param reader Node where need the data - * @param nodes Available replicas with the requested data - * @param activeLen Number of active nodes at the front of the array - */ - List sortByDistanceCost(Node reader, - List nodes, int activeLen); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java deleted file mode 100644 index 579e5f71c7913..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ /dev/null @@ -1,798 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import org.apache.commons.collections.CollectionUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.TreeMap; -import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.stream.Collectors; - -import org.apache.hadoop.conf.Configuration; -import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; -import static org.apache.hadoop.hdds.scm.net.NetConstants.SCOPE_REVERSE_STR; -import static org.apache.hadoop.hdds.scm.net.NetConstants.ANCESTOR_GENERATION_DEFAULT; - -/** - * The class represents a cluster of computers with a tree hierarchical - * network topology. In the network topology, leaves represent data nodes - * (computers) and inner nodes represent datacenter/core-switches/routers that - * manages traffic in/out of data centers or racks. - */ -public class NetworkTopologyImpl implements NetworkTopology{ - public static final Logger LOG = - LoggerFactory.getLogger(NetworkTopology.class); - - /** The Inner node crate factory. */ - private final InnerNode.Factory factory; - /** The root cluster tree. */ - private final InnerNode clusterTree; - /** Depth of all leaf nodes. */ - private final int maxLevel; - /** Schema manager. */ - private final NodeSchemaManager schemaManager; - /** Lock to coordinate cluster tree access. */ - private ReadWriteLock netlock = new ReentrantReadWriteLock(true); - - public NetworkTopologyImpl(Configuration conf) { - schemaManager = NodeSchemaManager.getInstance(); - schemaManager.init(conf); - maxLevel = schemaManager.getMaxLevel(); - factory = InnerNodeImpl.FACTORY; - clusterTree = factory.newInnerNode(ROOT, null, null, - NetConstants.ROOT_LEVEL, - schemaManager.getCost(NetConstants.ROOT_LEVEL)); - } - - @VisibleForTesting - public NetworkTopologyImpl(NodeSchemaManager manager) { - schemaManager = manager; - maxLevel = schemaManager.getMaxLevel(); - factory = InnerNodeImpl.FACTORY; - clusterTree = factory.newInnerNode(ROOT, null, null, - NetConstants.ROOT_LEVEL, - schemaManager.getCost(NetConstants.ROOT_LEVEL)); - } - - /** - * Add a leaf node. This will be called when a new datanode is added. - * @param node node to be added; can be null - * @exception IllegalArgumentException if add a node to a leave or node to be - * added is not a leaf - */ - public void add(Node node) { - Preconditions.checkArgument(node != null, "node cannot be null"); - if (node instanceof InnerNode) { - throw new IllegalArgumentException( - "Not allowed to add an inner node: "+ node.getNetworkFullPath()); - } - int newDepth = NetUtils.locationToDepth(node.getNetworkLocation()) + 1; - - // Check depth - if (maxLevel != newDepth) { - throw new InvalidTopologyException("Failed to add " + - node.getNetworkFullPath() + ": Its path depth is not " + maxLevel); - } - netlock.writeLock().lock(); - boolean add; - try { - add = clusterTree.add(node); - }finally { - netlock.writeLock().unlock(); - } - - if (add) { - LOG.info("Added a new node: " + node.getNetworkFullPath()); - if (LOG.isDebugEnabled()) { - LOG.debug("NetworkTopology became:\n{}", this); - } - } - } - - /** - * Remove a node from the network topology. This will be called when a - * existing datanode is removed from the system. - * @param node node to be removed; cannot be null - */ - public void remove(Node node) { - Preconditions.checkArgument(node != null, "node cannot be null"); - if (node instanceof InnerNode) { - throw new IllegalArgumentException( - "Not allowed to remove an inner node: "+ node.getNetworkFullPath()); - } - netlock.writeLock().lock(); - try { - clusterTree.remove(node); - }finally { - netlock.writeLock().unlock(); - } - LOG.info("Removed a node: " + node.getNetworkFullPath()); - if (LOG.isDebugEnabled()) { - LOG.debug("NetworkTopology became:\n{}", this); - } - } - - /** - * Check if the tree already contains node node. - * @param node a node - * @return true if node is already in the tree; false otherwise - */ - public boolean contains(Node node) { - Preconditions.checkArgument(node != null, "node cannot be null"); - netlock.readLock().lock(); - try { - Node parent = node.getParent(); - while (parent != null && parent != clusterTree) { - parent = parent.getParent(); - } - if (parent == clusterTree) { - return true; - } - } finally { - netlock.readLock().unlock(); - } - return false; - } - - /** - * Compare the specified ancestor generation of each node for equality. - * @return true if their specified generation ancestor are equal - */ - public boolean isSameAncestor(Node node1, Node node2, int ancestorGen) { - if (node1 == null || node2 == null || ancestorGen <= 0) { - return false; - } - netlock.readLock().lock(); - try { - return node1.getAncestor(ancestorGen) == node2.getAncestor(ancestorGen); - } finally { - netlock.readLock().unlock(); - } - } - - /** - * Compare the direct parent of each node for equality. - * @return true if their parent are the same - */ - public boolean isSameParent(Node node1, Node node2) { - if (node1 == null || node2 == null) { - return false; - } - netlock.readLock().lock(); - try { - node1 = node1.getParent(); - node2 = node2.getParent(); - return node1 == node2; - } finally { - netlock.readLock().unlock(); - } - } - - /** - * Get the ancestor for node on generation ancestorGen. - * - * @param node the node to get ancestor - * @param ancestorGen the ancestor generation - * @return the ancestor. If no ancestor is found, then null is returned. - */ - public Node getAncestor(Node node, int ancestorGen) { - if (node == null) { - return null; - } - netlock.readLock().lock(); - try { - return node.getAncestor(ancestorGen); - } finally { - netlock.readLock().unlock(); - } - } - - /** - * Given a string representation of a node(leaf or inner), return its - * reference. - * @param loc a path string representing a node, can be leaf or inner node - * @return a reference to the node, null if the node is not in the tree - */ - public Node getNode(String loc) { - loc = NetUtils.normalize(loc); - netlock.readLock().lock(); - try { - if (!ROOT.equals(loc)) { - return clusterTree.getNode(loc); - } else { - return clusterTree; - } - } finally { - netlock.readLock().unlock(); - } - } - - /** - * Given a string representation of Node, return its leaf nodes count. - * @param loc a path-like string representation of Node - * @return the number of leaf nodes for InnerNode, 1 for leaf node, 0 if node - * doesn't exist - */ - public int getNumOfLeafNode(String loc) { - netlock.readLock().lock(); - try { - Node node = getNode(loc); - if (node != null) { - return node.getNumOfLeaves(); - } - } finally { - netlock.readLock().unlock(); - } - return 0; - } - - /** - * Return the max level of this tree, start from 1 for ROOT. For example, - * topology like "/rack/node" has the max level '3'. - */ - public int getMaxLevel() { - return maxLevel; - } - - /** - * Return the node numbers at level level. - * @param level topology level, start from 1, which means ROOT - * @return the number of nodes on the level - */ - public int getNumOfNodes(int level) { - Preconditions.checkArgument(level > 0 && level <= maxLevel, - "Invalid level"); - netlock.readLock().lock(); - try { - return clusterTree.getNumOfNodes(level); - } finally { - netlock.readLock().unlock(); - } - } - - /** - * Randomly choose a node in the scope. - * @param scope range of nodes from which a node will be chosen. If scope - * starts with ~, choose one from the all nodes except for the - * ones in scope; otherwise, choose one from scope. - * @return the chosen node - */ - public Node chooseRandom(String scope) { - if (scope == null) { - scope = ROOT; - } - if (scope.startsWith(SCOPE_REVERSE_STR)) { - ArrayList excludedScopes = new ArrayList(); - excludedScopes.add(scope.substring(1)); - return chooseRandom(ROOT, excludedScopes, null, null, - ANCESTOR_GENERATION_DEFAULT); - } else { - return chooseRandom(scope, null, null, null, ANCESTOR_GENERATION_DEFAULT); - } - } - - /** - * Randomly choose a node in the scope, ano not in the exclude scope. - * @param scope range of nodes from which a node will be chosen. cannot start - * with ~ - * @param excludedScopes the chosen node cannot be in these ranges. cannot - * starts with ~ - * @return the chosen node - */ - public Node chooseRandom(String scope, List excludedScopes) { - return chooseRandom(scope, excludedScopes, null, null, - ANCESTOR_GENERATION_DEFAULT); - } - - /** - * Randomly choose a leaf node from scope. - * - * If scope starts with ~, choose one from the all nodes except for the - * ones in scope; otherwise, choose nodes from scope. - * If excludedNodes is given, choose a node that's not in excludedNodes. - * - * @param scope range of nodes from which a node will be chosen - * @param excludedNodes nodes to be excluded - * - * @return the chosen node - */ - public Node chooseRandom(String scope, Collection excludedNodes) { - if (scope == null) { - scope = ROOT; - } - if (scope.startsWith(SCOPE_REVERSE_STR)) { - ArrayList excludedScopes = new ArrayList(); - excludedScopes.add(scope.substring(1)); - return chooseRandom(ROOT, excludedScopes, excludedNodes, null, - ANCESTOR_GENERATION_DEFAULT); - } else { - return chooseRandom(scope, null, excludedNodes, null, - ANCESTOR_GENERATION_DEFAULT); - } - } - - /** - * Randomly choose a leaf node from scope. - * - * If scope starts with ~, choose one from the all nodes except for the - * ones in scope; otherwise, choose nodes from scope. - * If excludedNodes is given, choose a node that's not in excludedNodes. - * - * @param scope range of nodes from which a node will be chosen - * @param excludedNodes nodes to be excluded from. - * @param ancestorGen matters when excludeNodes is not null. It means the - * ancestor generation that's not allowed to share between chosen node and the - * excludedNodes. For example, if ancestorGen is 1, means chosen node - * cannot share the same parent with excludeNodes. If value is 2, cannot - * share the same grand parent, and so on. If ancestorGen is 0, then no - * effect. - * - * @return the chosen node - */ - public Node chooseRandom(String scope, Collection excludedNodes, - int ancestorGen) { - if (scope == null) { - scope = ROOT; - } - if (scope.startsWith(SCOPE_REVERSE_STR)) { - ArrayList excludedScopes = new ArrayList(); - excludedScopes.add(scope.substring(1)); - return chooseRandom(ROOT, excludedScopes, excludedNodes, null, - ancestorGen); - } else { - return chooseRandom(scope, null, excludedNodes, null, ancestorGen); - } - } - - /** - * Randomly choose one leaf node from scope, share the same generation - * ancestor with affinityNode, and exclude nodes in - * excludeScope and excludeNodes. - * - * @param scope range of nodes from which a node will be chosen, cannot start - * with ~ - * @param excludedScopes ranges of nodes to be excluded, cannot start with ~ - * @param excludedNodes nodes to be excluded - * @param affinityNode when not null, the chosen node should share the same - * ancestor with this node at generation ancestorGen. - * Ignored when value is null - * @param ancestorGen If 0, then no same generation ancestor enforcement on - * both excludedNodes and affinityNode. If greater than 0, - * then apply to affinityNode(if not null), or apply to - * excludedNodes if affinityNode is null - * @return the chosen node - */ - public Node chooseRandom(String scope, List excludedScopes, - Collection excludedNodes, Node affinityNode, int ancestorGen) { - if (scope == null) { - scope = ROOT; - } - - checkScope(scope); - checkExcludedScopes(excludedScopes); - checkAffinityNode(affinityNode); - checkAncestorGen(ancestorGen); - - netlock.readLock().lock(); - try { - return chooseNodeInternal(scope, -1, excludedScopes, - excludedNodes, affinityNode, ancestorGen); - } finally { - netlock.readLock().unlock(); - } - } - - /** - * Choose the leaf node at index index from scope, share the - * same generation ancestor with affinityNode, and exclude nodes in - * excludeScope and excludeNodes. - * - * @param leafIndex node index, exclude nodes in excludedScope and - * excludedNodes - * @param scope range of nodes from which a node will be chosen, cannot start - * with ~ - * @param excludedScopes ranges of nodes to be excluded, cannot start with ~ - * @param excludedNodes nodes to be excluded - * @param affinityNode when not null, the chosen node should share the same - * ancestor with this node at generation ancestorGen. - * Ignored when value is null - * @param ancestorGen If 0, then no same generation ancestor enforcement on - * both excludedNodes and affinityNode. If greater than 0, - * then apply to affinityNode(if not null), or apply to - * excludedNodes if affinityNode is null - * @return the chosen node - * Example: - * - * / --- root - * / \ - * / \ - * / \ - * / \ - * dc1 dc2 - * / \ / \ - * / \ / \ - * / \ / \ - * rack1 rack2 rack1 rack2 - * / \ / \ / \ / \ - * n1 n2 n3 n4 n5 n6 n7 n8 - * - * Input: - * leafIndex = 1 - * excludedScope = /dc2 - * excludedNodes = {/dc1/rack1/n1} - * affinityNode = /dc1/rack2/n2 - * ancestorGen = 2 - * - * Output: - * node /dc1/rack2/n4 - * - * Explanation: - * With affinityNode n2 and ancestorGen 2, it means we can only pick node - * from subtree /dc1. LeafIndex 1, so we pick the 2nd available node n4. - * - */ - public Node getNode(int leafIndex, String scope, List excludedScopes, - Collection excludedNodes, Node affinityNode, int ancestorGen) { - Preconditions.checkArgument(leafIndex >= 0); - if (scope == null) { - scope = ROOT; - } - checkScope(scope); - checkExcludedScopes(excludedScopes); - checkAffinityNode(affinityNode); - checkAncestorGen(ancestorGen); - - netlock.readLock().lock(); - try { - return chooseNodeInternal(scope, leafIndex, excludedScopes, - excludedNodes, affinityNode, ancestorGen); - } finally { - netlock.readLock().unlock(); - } - } - - private Node chooseNodeInternal(String scope, int leafIndex, - List excludedScopes, Collection excludedNodes, - Node affinityNode, int ancestorGen) { - Preconditions.checkArgument(scope != null); - - String finalScope = scope; - if (affinityNode != null && ancestorGen > 0) { - Node affinityAncestor = affinityNode.getAncestor(ancestorGen); - if (affinityAncestor == null) { - throw new IllegalArgumentException("affinityNode " + - affinityNode.getNetworkFullPath() + " doesn't have ancestor on" + - " generation " + ancestorGen); - } - // affinity ancestor should has overlap with scope - if (affinityAncestor.getNetworkFullPath().startsWith(scope)){ - finalScope = affinityAncestor.getNetworkFullPath(); - } else if (!scope.startsWith(affinityAncestor.getNetworkFullPath())) { - return null; - } - // reset ancestor generation since the new scope is identified now - ancestorGen = 0; - } - - // check overlap of excludedScopes and finalScope - List mutableExcludedScopes = null; - if (excludedScopes != null && !excludedScopes.isEmpty()) { - mutableExcludedScopes = new ArrayList<>(); - for (String s: excludedScopes) { - // excludeScope covers finalScope - if (finalScope.startsWith(s)) { - return null; - } - // excludeScope and finalScope share nothing case - if (s.startsWith(finalScope)) { - if (!mutableExcludedScopes.stream().anyMatch( - e -> s.startsWith(e))) { - mutableExcludedScopes.add(s); - } - } - } - } - - // clone excludedNodes before remove duplicate in it - Collection mutableExNodes = null; - - // Remove duplicate in excludedNodes - if (excludedNodes != null) { - mutableExNodes = - excludedNodes.stream().distinct().collect(Collectors.toList()); - } - - // remove duplicate in mutableExNodes and mutableExcludedScopes - NetUtils.removeDuplicate(this, mutableExNodes, mutableExcludedScopes, - ancestorGen); - - // calculate available node count - Node scopeNode = getNode(finalScope); - int availableNodes = getAvailableNodesCount( - scopeNode.getNetworkFullPath(), mutableExcludedScopes, mutableExNodes, - ancestorGen); - - if (availableNodes <= 0) { - LOG.warn("No available node in (scope=\"{}\" excludedScope=\"{}\" " + - "excludedNodes=\"{}\" ancestorGen=\"{}\").", - scopeNode.getNetworkFullPath(), excludedScopes, excludedNodes, - ancestorGen); - return null; - } - - // scope is a Leaf node - if (!(scopeNode instanceof InnerNode)) { - return scopeNode; - } - - Node ret; - int nodeIndex; - if (leafIndex >= 0) { - nodeIndex = leafIndex % availableNodes; - ret = ((InnerNode)scopeNode).getLeaf(nodeIndex, mutableExcludedScopes, - mutableExNodes, ancestorGen); - } else { - nodeIndex = ThreadLocalRandom.current().nextInt(availableNodes); - ret = ((InnerNode)scopeNode).getLeaf(nodeIndex, mutableExcludedScopes, - mutableExNodes, ancestorGen); - } - if (LOG.isDebugEnabled()) { - LOG.debug("Choosing node[index={},random={}] from \"{}\" available " + - "nodes, scope=\"{}\", excludedScope=\"{}\", excludeNodes=\"{}\".", - nodeIndex, (leafIndex == -1 ? "true" : "false"), availableNodes, - scopeNode.getNetworkFullPath(), excludedScopes, excludedNodes); - LOG.debug("Chosen node = {}", (ret == null ? "not found" : - ret.toString())); - } - return ret; - } - - /** Return the distance cost between two nodes - * The distance cost from one node to its parent is it's parent's cost - * The distance cost between two nodes is calculated by summing up their - * distances cost to their closest common ancestor. - * @param node1 one node - * @param node2 another node - * @return the distance cost between node1 and node2 which is zero if they - * are the same or {@link Integer#MAX_VALUE} if node1 or node2 do not belong - * to the cluster - */ - public int getDistanceCost(Node node1, Node node2) { - if ((node1 != null && node2 != null && node1.equals(node2)) || - (node1 == null && node2 == null)) { - return 0; - } - if (node1 == null || node2 == null) { - LOG.warn("One of the nodes is a null pointer"); - return Integer.MAX_VALUE; - } - int cost = 0; - netlock.readLock().lock(); - try { - if ((node1.getAncestor(maxLevel - 1) != clusterTree) || - (node2.getAncestor(maxLevel - 1) != clusterTree)) { - LOG.debug("One of the nodes is outside of network topology"); - return Integer.MAX_VALUE; - } - int level1 = node1.getLevel(); - int level2 = node2.getLevel(); - if (level1 > maxLevel || level2 > maxLevel) { - return Integer.MAX_VALUE; - } - while(level1 > level2 && node1 != null) { - node1 = node1.getParent(); - level1--; - cost += node1 == null? 0 : node1.getCost(); - } - while(level2 > level1 && node2 != null) { - node2 = node2.getParent(); - level2--; - cost += node2 == null? 0 : node2.getCost(); - } - while(node1 != null && node2 != null && node1 != node2) { - node1 = node1.getParent(); - node2 = node2.getParent(); - cost += node1 == null? 0 : node1.getCost(); - cost += node2 == null? 0 : node2.getCost(); - } - return cost; - } finally { - netlock.readLock().unlock(); - } - } - - /** - * Sort nodes array by network distance to reader to reduces network - * traffic and improves performance. - * - * As an additional twist, we also randomize the nodes at each network - * distance. This helps with load balancing when there is data skew. - * - * @param reader Node where need the data - * @param nodes Available replicas with the requested data - * @param activeLen Number of active nodes at the front of the array - */ - public List sortByDistanceCost(Node reader, - List nodes, int activeLen) { - /** Sort weights for the nodes array */ - if (reader == null) { - return nodes; - } - int[] costs = new int[activeLen]; - for (int i = 0; i < activeLen; i++) { - costs[i] = getDistanceCost(reader, nodes.get(i)); - } - // Add cost/node pairs to a TreeMap to sort - TreeMap> tree = new TreeMap>(); - for (int i = 0; i < activeLen; i++) { - int cost = costs[i]; - Node node = nodes.get(i); - List list = tree.get(cost); - if (list == null) { - list = Lists.newArrayListWithExpectedSize(1); - tree.put(cost, list); - } - list.add(node); - } - - List ret = new ArrayList<>(); - for (List list: tree.values()) { - if (list != null) { - Collections.shuffle(list); - for (Node n: list) { - ret.add(n); - } - } - } - - Preconditions.checkState(ret.size() == activeLen, - "Wrong number of nodes sorted!"); - return ret; - } - - /** - * Return the number of leaves in scope but not in - * excludedNodes and excludeScope. - * @param scope the scope - * @param excludedScopes excluded scopes - * @param mutableExcludedNodes a list of excluded nodes, content might be - * changed after the call - * @param ancestorGen same generation ancestor prohibit on excludedNodes - * @return number of available nodes - */ - private int getAvailableNodesCount(String scope, List excludedScopes, - Collection mutableExcludedNodes, int ancestorGen) { - Preconditions.checkArgument(scope != null); - - Node scopeNode = getNode(scope); - if (scopeNode == null) { - return 0; - } - NetUtils.removeOutscope(mutableExcludedNodes, scope); - List excludedAncestorList = - NetUtils.getAncestorList(this, mutableExcludedNodes, ancestorGen); - for (Node ancestor : excludedAncestorList) { - if (scope.startsWith(ancestor.getNetworkFullPath())){ - return 0; - } - } - // number of nodes to exclude - int excludedCount = 0; - if (excludedScopes != null) { - for (String excludedScope: excludedScopes) { - Node excludedScopeNode = getNode(excludedScope); - if (excludedScopeNode != null) { - if (excludedScope.startsWith(scope)) { - excludedCount += excludedScopeNode.getNumOfLeaves(); - } else if (scope.startsWith(excludedScope)) { - return 0; - } - } - } - } - // excludedNodes is not null case - if (mutableExcludedNodes != null && (!mutableExcludedNodes.isEmpty())) { - if (ancestorGen == 0) { - for (Node node: mutableExcludedNodes) { - if (contains(node)) { - excludedCount++; - } - } - } else { - for (Node ancestor : excludedAncestorList) { - if (ancestor.getNetworkFullPath().startsWith(scope)) { - excludedCount += ancestor.getNumOfLeaves(); - } - } - } - } - - int availableCount = scopeNode.getNumOfLeaves() - excludedCount; - Preconditions.checkState(availableCount >= 0); - return availableCount; - } - - @Override - public String toString() { - // print max level - StringBuilder tree = new StringBuilder(); - tree.append("Level: "); - tree.append(maxLevel); - tree.append("\n"); - netlock.readLock().lock(); - try { - // print the number of leaves - int numOfLeaves = clusterTree.getNumOfLeaves(); - tree.append("Number of leaves:"); - tree.append(numOfLeaves); - tree.append("\n"); - // print all nodes - for (int i = 0; i < numOfLeaves; i++) { - tree.append(clusterTree.getLeaf(i).getNetworkFullPath()); - tree.append("\n"); - } - } finally { - netlock.readLock().unlock(); - } - return tree.toString(); - } - - private void checkScope(String scope) { - if (scope != null && scope.startsWith(SCOPE_REVERSE_STR)) { - throw new IllegalArgumentException("scope " + scope + - " should not start with " + SCOPE_REVERSE_STR); - } - } - - private void checkExcludedScopes(List excludedScopes) { - if (!CollectionUtils.isEmpty(excludedScopes)) { - excludedScopes.stream().forEach(scope -> { - if (scope.startsWith(SCOPE_REVERSE_STR)) { - throw new IllegalArgumentException("excludedScope " + scope + - " cannot start with " + SCOPE_REVERSE_STR); - } - }); - } - } - - private void checkAffinityNode(Node affinityNode) { - if (affinityNode != null && (!contains(affinityNode))) { - throw new IllegalArgumentException("Affinity node " + - affinityNode.getNetworkFullPath() + " is not a member of topology"); - } - } - - private void checkAncestorGen(int ancestorGen) { - if (ancestorGen > (maxLevel - 1) || ancestorGen < 0) { - throw new IllegalArgumentException("ancestorGen " + ancestorGen + - " exceeds this network topology acceptable level [0, " + - (maxLevel - 1) + "]"); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java deleted file mode 100644 index 0007e546770f0..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -/** - * The interface defines a node in a network topology. - * A node may be a leave representing a data node or an inner - * node representing a data center or rack. - * Each node has a name and its location in the network is - * decided by a string with syntax similar to a file name. - * For example, a data node's name is hostname:port# and if it's located at - * rack "orange" in data center "dog", the string representation of its - * network location will be /dog/orange. - */ -public interface Node { - /** @return the string representation of this node's network location path, - * exclude itself. In another words, its parent's full network location */ - String getNetworkLocation(); - - /** - * Set this node's network location. - * @param location it's network location - */ - void setNetworkLocation(String location); - - /** @return this node's self name in network topology. This should be node's - * IP or hostname. - * */ - String getNetworkName(); - - /** - * Set this node's name, can be hostname or Ipaddress. - * @param name it's network name - */ - void setNetworkName(String name); - - /** @return this node's full path in network topology. It's the concatenation - * of location and name. - * */ - String getNetworkFullPath(); - - /** @return this node's parent */ - InnerNode getParent(); - - /** - * Set this node's parent. - * @param parent the parent - */ - void setParent(InnerNode parent); - - /** @return this node's ancestor, generation 0 is itself, generation 1 is - * node's parent, and so on.*/ - Node getAncestor(int generation); - - /** - * @return this node's level in the tree. - * E.g. the root of a tree returns 1 and root's children return 2 - */ - int getLevel(); - - /** - * Set this node's level in the tree. - * @param i the level - */ - void setLevel(int i); - - /** - * @return this node's cost when network traffic go through it. - * E.g. the cost of going cross a switch is 1, and cost of going through a - * datacenter can be 5. - * Be default the cost of leaf datanode is 0, all other node is 1. - */ - int getCost(); - - /** @return the leaf nodes number under this node. */ - int getNumOfLeaves(); - - /** - * Judge if this node is an ancestor of node n. - * Ancestor includes itself and parents case. - * - * @param n a node - * @return true if this node is an ancestor of n - */ - boolean isAncestor(Node n); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java deleted file mode 100644 index 53b05ea294166..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java +++ /dev/null @@ -1,222 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import com.google.common.base.Preconditions; - -import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; -import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR; - -/** - * A thread safe class that implements interface Node. - */ -public class NodeImpl implements Node { - // host:port# - private String name; - // string representation of this node's location, such as /dc1/rack1 - private String location; - // location + "/" + name - private String path; - // which level of the tree the node resides, start from 1 for root - private int level; - // node's parent - private InnerNode parent; - // the cost to go through this node - private final int cost; - - /** - * Construct a node from its name and its location. - * @param name this node's name (can be null, must not contain - * {@link NetConstants#PATH_SEPARATOR}) - * @param location this node's location - */ - public NodeImpl(String name, String location, int cost) { - if (name != null && name.contains(PATH_SEPARATOR_STR)) { - throw new IllegalArgumentException( - "Network location name:" + name + " should not contain " + - PATH_SEPARATOR_STR); - } - this.name = (name == null) ? ROOT : name; - this.location = NetUtils.normalize(location); - this.path = getPath(); - this.cost = cost; - } - - /** - * Construct a node from its name and its location. - * - * @param name this node's name (can be null, must not contain - * {@link NetConstants#PATH_SEPARATOR}) - * @param location this node's location - * @param parent this node's parent node - * @param level this node's level in the tree - * @param cost this node's cost if traffic goes through it - */ - public NodeImpl(String name, String location, InnerNode parent, int level, - int cost) { - this(name, location, cost); - this.parent = parent; - this.level = level; - } - - /** - * @return this node's name - */ - public String getNetworkName() { - return name; - } - - /** - * Set this node's name, can be hostname or Ipaddress. - * @param networkName it's network name - */ - public void setNetworkName(String networkName) { - this.name = networkName; - this.path = getPath(); - } - - /** - * @return this node's network location - */ - public String getNetworkLocation() { - return location; - } - - /** - * Set this node's network location. - * @param networkLocation it's network location - */ - @Override - public void setNetworkLocation(String networkLocation) { - this.location = networkLocation; - this.path = getPath(); - } - - /** - * @return this node's full path in network topology. It's the concatenation - * of location and name. - */ - public String getNetworkFullPath() { - return path; - } - - /** - * @return this node's parent - */ - public InnerNode getParent() { - return parent; - } - - /** - * @return this node's ancestor, generation 0 is itself, generation 1 is - * node's parent, and so on. - */ - public Node getAncestor(int generation) { - Preconditions.checkArgument(generation >= 0); - Node current = this; - while (generation > 0 && current != null) { - current = current.getParent(); - generation--; - } - return current; - } - - /** - * Set this node's parent. - * - * @param parent the parent - */ - public void setParent(InnerNode parent) { - this.parent = parent; - } - - /** - * @return this node's level in the tree. - * E.g. the root of a tree returns 0 and its children return 1 - */ - public int getLevel() { - return this.level; - } - - /** - * Set this node's level in the tree. - * - * @param level the level - */ - public void setLevel(int level) { - this.level = level; - } - - /** - * @return this node's cost when network traffic go through it. - * E.g. the cost of going cross a switch is 1, and cost of going through a - * datacenter is 5. - * Be default the cost of leaf datanode is 0, all other inner node is 1. - */ - public int getCost() { - return this.cost; - } - - /** @return the leaf nodes number under this node. */ - public int getNumOfLeaves() { - return 1; - } - - /** - * Check if this node is an ancestor of node node. Ancestor includes - * itself and parents case; - * @param node a node - * @return true if this node is an ancestor of node - */ - public boolean isAncestor(Node node) { - return this.getNetworkFullPath().equals(PATH_SEPARATOR_STR) || - node.getNetworkLocation().startsWith(this.getNetworkFullPath()) || - node.getNetworkFullPath().equalsIgnoreCase( - this.getNetworkFullPath()); - } - - @Override - public boolean equals(Object to) { - if (to == null) { - return false; - } - if (this == to) { - return true; - } - return this.toString().equals(to.toString()); - } - - @Override - public int hashCode() { - return toString().hashCode(); - } - - /** - * @return this node's path as its string representation - */ - @Override - public String toString() { - return getNetworkFullPath(); - } - - private String getPath() { - return this.location.equals(PATH_SEPARATOR_STR) ? - this.location + this.name : - this.location + PATH_SEPARATOR_STR + this.name; - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java deleted file mode 100644 index 47e5de880d6d7..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java +++ /dev/null @@ -1,183 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.apache.hadoop.HadoopIllegalArgumentException; - -import java.util.List; - -/** - * Network topology schema to housekeeper relevant information. - */ -public final class NodeSchema { - /** - * Network topology layer type enum definition. - */ - public enum LayerType{ - ROOT("Root", NetConstants.INNER_NODE_COST_DEFAULT), - INNER_NODE("InnerNode", NetConstants.INNER_NODE_COST_DEFAULT), - LEAF_NODE("Leaf", NetConstants.NODE_COST_DEFAULT); - - private final String description; - // default cost - private final int cost; - - LayerType(String description, int cost) { - this.description = description; - this.cost = cost; - } - - @Override - public String toString() { - return description; - } - - public int getCost(){ - return cost; - } - public static LayerType getType(String typeStr) { - for (LayerType type: LayerType.values()) { - if (typeStr.equalsIgnoreCase(type.toString())) { - return type; - } - } - return null; - } - } - - // default cost - private int cost; - // layer Type, mandatory property - private LayerType type; - // default name, can be null or "" - private String defaultName; - // layer prefix, can be null or "" - private String prefix; - // sublayer - private List sublayer; - - /** - * Builder for NodeSchema. - */ - public static class Builder { - private int cost = -1; - private LayerType type; - private String defaultName; - private String prefix; - - public Builder setCost(int nodeCost) { - this.cost = nodeCost; - return this; - } - - public Builder setPrefix(String nodePrefix) { - this.prefix = nodePrefix; - return this; - } - - public Builder setType(LayerType nodeType) { - this.type = nodeType; - return this; - } - - public Builder setDefaultName(String nodeDefaultName) { - this.defaultName = nodeDefaultName; - return this; - } - - public NodeSchema build() { - if (type == null) { - throw new HadoopIllegalArgumentException("Type is mandatory for a " + - "network topology node layer definition"); - } - if (cost == -1) { - cost = type.getCost(); - } - return new NodeSchema(type, cost, prefix, defaultName); - } - } - - /** - * Constructor. - * @param type layer type - * @param cost layer's default cost - * @param prefix layer's prefix - * @param defaultName layer's default name is if specified - */ - public NodeSchema(LayerType type, int cost, String prefix, - String defaultName) { - this.type = type; - this.cost = cost; - this.prefix = prefix; - this.defaultName = defaultName; - } - - /** - * Constructor. This constructor is only used when build NodeSchema from - * YAML file. - */ - public NodeSchema() { - this.type = LayerType.INNER_NODE; - } - - public boolean matchPrefix(String name) { - if (name == null || name.isEmpty() || prefix == null || prefix.isEmpty()) { - return false; - } - return name.trim().toLowerCase().startsWith(prefix.toLowerCase()); - } - - public LayerType getType() { - return this.type; - } - - public void setType(LayerType type) { - this.type = type; - } - - public String getPrefix() { - return this.prefix; - } - - public void setPrefix(String prefix) { - this.prefix = prefix; - } - - public String getDefaultName() { - return this.defaultName; - } - - public void setDefaultName(String name) { - this.defaultName = name; - } - - public int getCost() { - return this.cost; - } - public void setCost(int cost) { - this.cost = cost; - } - - public void setSublayer(List sublayer) { - this.sublayer = sublayer; - } - - public List getSublayer() { - return sublayer; - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java deleted file mode 100644 index 8d7abedf2e74a..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java +++ /dev/null @@ -1,489 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.apache.commons.io.FilenameUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; -import org.w3c.dom.Text; -import org.xml.sax.SAXException; - -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.parsers.ParserConfigurationException; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hdds.scm.net.NodeSchema.LayerType; -import org.yaml.snakeyaml.Yaml; - -/** - * A Network topology layer schema loading tool that loads user defined network - * layer schema data from a XML configuration file. - */ -public final class NodeSchemaLoader { - private static final Logger LOG - = LoggerFactory.getLogger(NodeSchemaLoader.class); - private static final String CONFIGURATION_TAG = "configuration"; - private static final String LAYOUT_VERSION_TAG = "layoutversion"; - private static final String TOPOLOGY_TAG = "topology"; - private static final String TOPOLOGY_PATH = "path"; - private static final String TOPOLOGY_ENFORCE_PREFIX = "enforceprefix"; - private static final String LAYERS_TAG = "layers"; - private static final String LAYER_TAG = "layer"; - private static final String LAYER_ID = "id"; - private static final String LAYER_TYPE = "type"; - private static final String LAYER_COST = "cost"; - private static final String LAYER_PREFIX = "prefix"; - private static final String LAYER_DEFAULT_NAME = "default"; - - private static final int LAYOUT_VERSION = 1; - private volatile static NodeSchemaLoader instance = null; - private NodeSchemaLoader() {} - - public static NodeSchemaLoader getInstance() { - if (instance == null) { - instance = new NodeSchemaLoader(); - } - return instance; - } - - /** - * Class to house keep the result of parsing a network topology schema file. - */ - public static class NodeSchemaLoadResult { - private List schemaList; - private boolean enforcePrefix; - - NodeSchemaLoadResult(List schemaList, boolean enforcePrefix) { - this.schemaList = schemaList; - this.enforcePrefix = enforcePrefix; - } - - public boolean isEnforePrefix() { - return enforcePrefix; - } - - public List getSchemaList() { - return schemaList; - } - } - - /** - * Load user defined network layer schemas from a XML/YAML configuration file. - * @param schemaFilePath path of schema file - * @return all valid node schemas defined in schema file - */ - public NodeSchemaLoadResult loadSchemaFromFile(String schemaFilePath) - throws IllegalArgumentException, FileNotFoundException { - try { - File schemaFile = new File(schemaFilePath); - - if (schemaFile.exists()) { - LOG.info("Load network topology schema file " + - schemaFile.getAbsolutePath()); - try (FileInputStream inputStream = new FileInputStream(schemaFile)) { - return loadSchemaFromStream(schemaFilePath, inputStream); - } - } else { - // try to load with classloader - ClassLoader classloader = - Thread.currentThread().getContextClassLoader(); - if (classloader == null) { - classloader = NodeSchemaLoader.class.getClassLoader(); - } - if (classloader != null) { - try (InputStream stream = classloader - .getResourceAsStream(schemaFilePath)) { - if (stream != null) { - LOG.info("Loading file from " + classloader - .getResources(schemaFilePath)); - return loadSchemaFromStream(schemaFilePath, stream); - } - } - } - - } - - String msg = "Network topology layer schema file " + - schemaFilePath + "[" + schemaFile.getAbsolutePath() + - "] is not found."; - LOG.warn(msg); - throw new FileNotFoundException(msg); - - } catch (FileNotFoundException e) { - throw e; - } catch (ParserConfigurationException | IOException | SAXException e) { - throw new IllegalArgumentException("Failed to load network topology node" - + " schema file: " + schemaFilePath + " , error:" + e.getMessage(), - e); - } - } - - private NodeSchemaLoadResult loadSchemaFromStream(String schemaFilePath, - InputStream stream) - throws ParserConfigurationException, SAXException, IOException { - if (FilenameUtils.getExtension(schemaFilePath).toLowerCase() - .compareTo("yaml") == 0) { - return loadSchemaFromYaml(stream); - } else { - return loadSchema(stream); - } - } - - /** - * Load network topology layer schemas from a XML configuration file. - * @param inputStream schema file as an inputStream - * @return all valid node schemas defined in schema file - * @throws ParserConfigurationException ParserConfigurationException happen - * @throws IOException no such schema file - * @throws SAXException xml file has some invalid elements - * @throws IllegalArgumentException xml file content is logically invalid - */ - private NodeSchemaLoadResult loadSchema(InputStream inputStream) throws - ParserConfigurationException, SAXException, IOException { - LOG.info("Loading network topology layer schema file"); - // Read and parse the schema file. - DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); - dbf.setIgnoringComments(true); - DocumentBuilder builder = dbf.newDocumentBuilder(); - Document doc = builder.parse(inputStream); - Element root = doc.getDocumentElement(); - - if (!CONFIGURATION_TAG.equals(root.getTagName())) { - throw new IllegalArgumentException("Bad network topology layer schema " + - "configuration file: top-level element not <" + CONFIGURATION_TAG + - ">"); - } - NodeSchemaLoadResult schemaList; - if (root.getElementsByTagName(LAYOUT_VERSION_TAG).getLength() == 1) { - if (loadLayoutVersion(root) == LAYOUT_VERSION) { - if (root.getElementsByTagName(LAYERS_TAG).getLength() == 1) { - Map schemas = loadLayersSection(root); - if (root.getElementsByTagName(TOPOLOGY_TAG).getLength() == 1) { - schemaList = loadTopologySection(root, schemas); - } else { - throw new IllegalArgumentException("Bad network topology layer " + - "schema configuration file: no or multiple <" + TOPOLOGY_TAG + - "> element"); - } - } else { - throw new IllegalArgumentException("Bad network topology layer schema" - + " configuration file: no or multiple <" + LAYERS_TAG + - ">element"); - } - } else { - throw new IllegalArgumentException("The parse failed because of bad " - + LAYOUT_VERSION_TAG + " value, expected:" + LAYOUT_VERSION); - } - } else { - throw new IllegalArgumentException("Bad network topology layer schema " + - "configuration file: no or multiple <" + LAYOUT_VERSION_TAG + - "> elements"); - } - return schemaList; - } - - /** - * Load network topology layer schemas from a YAML configuration file. - * @param schemaFile as inputStream - * @return all valid node schemas defined in schema file - * @throws ParserConfigurationException ParserConfigurationException happen - * @throws IOException no such schema file - * @throws SAXException xml file has some invalid elements - * @throws IllegalArgumentException xml file content is logically invalid - */ - private NodeSchemaLoadResult loadSchemaFromYaml(InputStream schemaFile) { - LOG.info("Loading network topology layer schema file {}", schemaFile); - NodeSchemaLoadResult finalSchema; - - try { - Yaml yaml = new Yaml(); - NodeSchema nodeTree; - - nodeTree = yaml.loadAs(schemaFile, NodeSchema.class); - - List schemaList = new ArrayList<>(); - if (nodeTree.getType() != LayerType.ROOT) { - throw new IllegalArgumentException("First layer is not a ROOT node." - + " schema file."); - } - schemaList.add(nodeTree); - if (nodeTree.getSublayer() != null) { - nodeTree = nodeTree.getSublayer().get(0); - } - - while (nodeTree != null) { - if (nodeTree.getType() == LayerType.LEAF_NODE - && nodeTree.getSublayer() != null) { - throw new IllegalArgumentException("Leaf node in the middle of path." - + " schema file."); - } - if (nodeTree.getType() == LayerType.ROOT) { - throw new IllegalArgumentException("Multiple root nodes are defined." - + " schema file."); - } - schemaList.add(nodeTree); - if (nodeTree.getSublayer() != null) { - nodeTree = nodeTree.getSublayer().get(0); - } else { - break; - } - } - finalSchema = new NodeSchemaLoadResult(schemaList, true); - } catch (Exception e) { - throw new IllegalArgumentException("Fail to load network topology node" - + " schema file: " + schemaFile + " , error:" - + e.getMessage(), e); - } - - return finalSchema; - } - - /** - * Load layoutVersion from root element in the XML configuration file. - * @param root root element - * @return layout version - */ - private int loadLayoutVersion(Element root) { - int layoutVersion; - Text text = (Text) root.getElementsByTagName(LAYOUT_VERSION_TAG) - .item(0).getFirstChild(); - if (text != null) { - String value = text.getData().trim(); - try { - layoutVersion = Integer.parseInt(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("Bad " + LAYOUT_VERSION_TAG + - " value " + value + " is found. It should be an integer."); - } - } else { - throw new IllegalArgumentException("Value of <" + LAYOUT_VERSION_TAG + - "> is null"); - } - return layoutVersion; - } - - /** - * Load layers from root element in the XML configuration file. - * @param root root element - * @return A map of node schemas with layer ID and layer schema - */ - private Map loadLayersSection(Element root) { - NodeList elements = root.getElementsByTagName(LAYER_TAG); - Map schemas = new HashMap(); - for (int i = 0; i < elements.getLength(); i++) { - Node node = elements.item(i); - if (node instanceof Element) { - Element element = (Element) node; - if (LAYER_TAG.equals(element.getTagName())) { - String layerId = element.getAttribute(LAYER_ID); - NodeSchema schema = parseLayerElement(element); - if (!schemas.containsValue(schema)) { - schemas.put(layerId, schema); - } else { - throw new IllegalArgumentException("Repetitive layer in network " + - "topology node schema configuration file: " + layerId); - } - } else { - throw new IllegalArgumentException("Bad element in network topology " - + "node schema configuration file: " + element.getTagName()); - } - } - } - - // Integrity check, only one ROOT and one LEAF is allowed - boolean foundRoot = false; - boolean foundLeaf = false; - for(NodeSchema schema: schemas.values()) { - if (schema.getType() == LayerType.ROOT) { - if (foundRoot) { - throw new IllegalArgumentException("Multiple ROOT layers are found" + - " in network topology schema configuration file"); - } else { - foundRoot = true; - } - } - if (schema.getType() == LayerType.LEAF_NODE) { - if (foundLeaf) { - throw new IllegalArgumentException("Multiple LEAF layers are found" + - " in network topology schema configuration file"); - } else { - foundLeaf = true; - } - } - } - if (!foundRoot) { - throw new IllegalArgumentException("No ROOT layer is found" + - " in network topology schema configuration file"); - } - if (!foundLeaf) { - throw new IllegalArgumentException("No LEAF layer is found" + - " in network topology schema configuration file"); - } - return schemas; - } - - /** - * Load network topology from root element in the XML configuration file and - * sort node schemas according to the topology path. - * @param root root element - * @param schemas schema map - * @return all valid node schemas defined in schema file - */ - private NodeSchemaLoadResult loadTopologySection(Element root, - Map schemas) { - NodeList elements = root.getElementsByTagName(TOPOLOGY_TAG) - .item(0).getChildNodes(); - List schemaList = new ArrayList(); - boolean enforecePrefix = false; - for (int i = 0; i < elements.getLength(); i++) { - Node node = elements.item(i); - if (node instanceof Element) { - Element element = (Element) node; - String tagName = element.getTagName(); - // Get the nonnull text value. - Text text = (Text) element.getFirstChild(); - String value; - if (text != null) { - value = text.getData().trim(); - if (value.isEmpty()) { - // Element with empty value is ignored - continue; - } - } else { - throw new IllegalArgumentException("Value of <" + tagName - + "> is null"); - } - if (TOPOLOGY_PATH.equals(tagName)) { - if(value.startsWith(NetConstants.PATH_SEPARATOR_STR)) { - value = value.substring(1, value.length()); - } - String[] layerIDs = value.split(NetConstants.PATH_SEPARATOR_STR); - if (layerIDs == null || layerIDs.length != schemas.size()) { - throw new IllegalArgumentException("Topology path depth doesn't " - + "match layer element numbers"); - } - for (int j = 0; j < layerIDs.length; j++) { - if (schemas.get(layerIDs[j]) == null) { - throw new IllegalArgumentException("No layer found for id " + - layerIDs[j]); - } - } - if (schemas.get(layerIDs[0]).getType() != LayerType.ROOT) { - throw new IllegalArgumentException("Topology path doesn't start " - + "with ROOT layer"); - } - if (schemas.get(layerIDs[layerIDs.length -1]).getType() != - LayerType.LEAF_NODE) { - throw new IllegalArgumentException("Topology path doesn't end " - + "with LEAF layer"); - } - for (int j = 0; j < layerIDs.length; j++) { - schemaList.add(schemas.get(layerIDs[j])); - } - } else if (TOPOLOGY_ENFORCE_PREFIX.equalsIgnoreCase(tagName)) { - enforecePrefix = Boolean.parseBoolean(value); - } else { - throw new IllegalArgumentException("Unsupported Element <" + - tagName + ">"); - } - } - } - // Integrity check - if (enforecePrefix) { - // Every InnerNode should have prefix defined - for (NodeSchema schema: schemas.values()) { - if (schema.getType() == LayerType.INNER_NODE && - schema.getPrefix() == null) { - throw new IllegalArgumentException("There is layer without prefix " + - "defined while prefix is enforced."); - } - } - } - return new NodeSchemaLoadResult(schemaList, enforecePrefix); - } - - /** - * Load a layer from a layer element in the XML configuration file. - * @param element network topology node layer element - * @return ECSchema - */ - private NodeSchema parseLayerElement(Element element) { - NodeList fields = element.getChildNodes(); - LayerType type = null; - int cost = 0; - String prefix = null; - String defaultName = null; - for (int i = 0; i < fields.getLength(); i++) { - Node fieldNode = fields.item(i); - if (fieldNode instanceof Element) { - Element field = (Element) fieldNode; - String tagName = field.getTagName(); - // Get the nonnull text value. - Text text = (Text) field.getFirstChild(); - String value; - if (text != null) { - value = text.getData().trim(); - if (value.isEmpty()) { - // Element with empty value is ignored - continue; - } - } else { - continue; - } - if (LAYER_COST.equalsIgnoreCase(tagName)) { - cost = Integer.parseInt(value); - if (cost < 0) { - throw new IllegalArgumentException( - "Cost should be positive number or 0"); - } - } else if (LAYER_TYPE.equalsIgnoreCase(tagName)) { - type = NodeSchema.LayerType.getType(value); - if (type == null) { - throw new IllegalArgumentException( - "Unsupported layer type:" + value); - } - } else if (LAYER_PREFIX.equalsIgnoreCase(tagName)) { - prefix = value; - } else if (LAYER_DEFAULT_NAME.equalsIgnoreCase(tagName)) { - defaultName = value; - } else { - throw new IllegalArgumentException("Unsupported Element <" + tagName - + ">"); - } - } - } - // type is a mandatory property - if (type == null) { - throw new IllegalArgumentException("Missing type Element"); - } - return new NodeSchema(type, cost, prefix, defaultName); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java deleted file mode 100644 index c60c2c80aa9e7..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java +++ /dev/null @@ -1,135 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.net.NodeSchemaLoader.NodeSchemaLoadResult; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.conf.Configuration; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** The class manages all network topology schemas. */ - -public final class NodeSchemaManager { - private static final Logger LOG = LoggerFactory.getLogger( - NodeSchemaManager.class); - - // All schema saved and sorted from ROOT to LEAF node - private List allSchema; - // enforcePrefix only applies to INNER_NODE - private boolean enforcePrefix; - // max level, includes ROOT level - private int maxLevel = -1; - - private volatile static NodeSchemaManager instance = null; - - private NodeSchemaManager() { - } - - public static NodeSchemaManager getInstance() { - if (instance == null) { - instance = new NodeSchemaManager(); - } - return instance; - } - - public void init(Configuration conf) { - /** - * Load schemas from network topology schema configuration file - */ - String schemaFile = conf.get( - ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, - ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT); - NodeSchemaLoadResult result; - try { - result = NodeSchemaLoader.getInstance().loadSchemaFromFile(schemaFile); - allSchema = result.getSchemaList(); - enforcePrefix = result.isEnforePrefix(); - maxLevel = allSchema.size(); - } catch (Throwable e) { - String msg = "Failed to load schema file:" + schemaFile - + ", error: " + e.getMessage(); - LOG.error(msg, e); - throw new RuntimeException(msg, e); - } - } - - @VisibleForTesting - public void init(NodeSchema[] schemas, boolean enforce) { - allSchema = new ArrayList<>(); - allSchema.addAll(Arrays.asList(schemas)); - enforcePrefix = enforce; - maxLevel = schemas.length; - } - - public int getMaxLevel() { - return maxLevel; - } - - public int getCost(int level) { - Preconditions.checkArgument(level <= maxLevel && - level >= (NetConstants.ROOT_LEVEL)); - return allSchema.get(level - NetConstants.ROOT_LEVEL).getCost(); - } - - /** - * Given a incomplete network path, return its complete network path if - * possible. E.g. input is 'node1', output is '/rack-default/node1' if this - * schema manages ROOT, RACK and LEAF, with prefix defined and enforce prefix - * enabled. - * - * @param path the incomplete input path - * @return complete path, null if cannot carry out complete action or action - * failed - */ - public String complete(String path) { - if (!enforcePrefix) { - return null; - } - String normalizedPath = NetUtils.normalize(path); - String[] subPath = normalizedPath.split(NetConstants.PATH_SEPARATOR_STR); - if ((subPath.length) == maxLevel) { - return path; - } - StringBuffer newPath = new StringBuffer(NetConstants.ROOT); - // skip the ROOT and LEAF layer - int i, j; - for (i = 1, j = 1; i < subPath.length && j < (allSchema.size() - 1);) { - if (allSchema.get(j).matchPrefix(subPath[i])) { - newPath.append(NetConstants.PATH_SEPARATOR_STR + subPath[i]); - i++; - j++; - } else { - newPath.append(allSchema.get(j).getDefaultName()); - j++; - } - } - if (i == (subPath.length - 1)) { - newPath.append(NetConstants.PATH_SEPARATOR_STR + subPath[i]); - return newPath.toString(); - } - return null; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/package-info.java deleted file mode 100644 index 375af7f0ea0c1..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; -/** - The network topology supported by Ozone. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java deleted file mode 100644 index 3c544db3ab933..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -/** - * This package contains classes for the client of the storage container - * protocol. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java deleted file mode 100644 index 2828f6ea41ca0..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * Represents a group of datanodes which store a container. - */ -public final class Pipeline { - - private static final Logger LOG = LoggerFactory.getLogger(Pipeline.class); - private final PipelineID id; - private final ReplicationType type; - private final ReplicationFactor factor; - - private PipelineState state; - private Map nodeStatus; - // nodes with ordered distance to client - private ThreadLocal> nodesInOrder = new ThreadLocal<>(); - - /** - * The immutable properties of pipeline object is used in - * ContainerStateManager#getMatchingContainerByPipeline to take a lock on - * the container allocations for a particular pipeline. - */ - private Pipeline(PipelineID id, ReplicationType type, - ReplicationFactor factor, PipelineState state, - Map nodeStatus) { - this.id = id; - this.type = type; - this.factor = factor; - this.state = state; - this.nodeStatus = nodeStatus; - } - - /** - * Returns the ID of this pipeline. - * - * @return PipelineID - */ - public PipelineID getId() { - return id; - } - - /** - * Returns the type. - * - * @return type - Simple or Ratis. - */ - public ReplicationType getType() { - return type; - } - - /** - * Returns the factor. - * - * @return type - Simple or Ratis. - */ - public ReplicationFactor getFactor() { - return factor; - } - - /** - * Returns the State of the pipeline. - * - * @return - LifeCycleStates. - */ - public PipelineState getPipelineState() { - return state; - } - - /** - * Returns the list of nodes which form this pipeline. - * - * @return List of DatanodeDetails - */ - public List getNodes() { - return new ArrayList<>(nodeStatus.keySet()); - } - - public DatanodeDetails getFirstNode() throws IOException { - if (nodeStatus.isEmpty()) { - throw new IOException(String.format("Pipeline=%s is empty", id)); - } - return nodeStatus.keySet().iterator().next(); - } - - public DatanodeDetails getClosestNode() throws IOException { - if (nodesInOrder.get() == null || nodesInOrder.get().isEmpty()) { - LOG.debug("Nodes in order is empty, delegate to getFirstNode"); - return getFirstNode(); - } - return nodesInOrder.get().get(0); - } - - public boolean isClosed() { - return state == PipelineState.CLOSED; - } - - public boolean isOpen() { - return state == PipelineState.OPEN; - } - - public void setNodesInOrder(List nodes) { - nodesInOrder.set(nodes); - } - - public List getNodesInOrder() { - if (nodesInOrder.get() == null || nodesInOrder.get().isEmpty()) { - LOG.debug("Nodes in order is empty, delegate to getNodes"); - return getNodes(); - } - return nodesInOrder.get(); - } - - void reportDatanode(DatanodeDetails dn) throws IOException { - if (nodeStatus.get(dn) == null) { - throw new IOException( - String.format("Datanode=%s not part of pipeline=%s", dn, id)); - } - nodeStatus.put(dn, System.currentTimeMillis()); - } - - boolean isHealthy() { - for (Long reportedTime : nodeStatus.values()) { - if (reportedTime < 0) { - return false; - } - } - return true; - } - - public boolean isEmpty() { - return nodeStatus.isEmpty(); - } - - public HddsProtos.Pipeline getProtobufMessage() - throws UnknownPipelineStateException { - HddsProtos.Pipeline.Builder builder = HddsProtos.Pipeline.newBuilder() - .setId(id.getProtobuf()) - .setType(type) - .setFactor(factor) - .setState(PipelineState.getProtobuf(state)) - .setLeaderID("") - .addAllMembers(nodeStatus.keySet().stream() - .map(DatanodeDetails::getProtoBufMessage) - .collect(Collectors.toList())); - // To save the message size on wire, only transfer the node order based on - // network topology - List nodes = nodesInOrder.get(); - if (nodes != null && !nodes.isEmpty()) { - for (int i = 0; i < nodes.size(); i++) { - Iterator it = nodeStatus.keySet().iterator(); - for (int j = 0; j < nodeStatus.keySet().size(); j++) { - if (it.next().equals(nodes.get(i))) { - builder.addMemberOrders(j); - break; - } - } - } - if (LOG.isDebugEnabled()) { - LOG.debug("Serialize pipeline {} with nodesInOrder{ }", id.toString(), - nodes); - } - } - return builder.build(); - } - - public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) - throws UnknownPipelineStateException { - Preconditions.checkNotNull(pipeline, "Pipeline is null"); - return new Builder().setId(PipelineID.getFromProtobuf(pipeline.getId())) - .setFactor(pipeline.getFactor()) - .setType(pipeline.getType()) - .setState(PipelineState.fromProtobuf(pipeline.getState())) - .setNodes(pipeline.getMembersList().stream() - .map(DatanodeDetails::getFromProtoBuf).collect(Collectors.toList())) - .setNodesInOrder(pipeline.getMemberOrdersList()) - .build(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - Pipeline that = (Pipeline) o; - - return new EqualsBuilder() - .append(id, that.id) - .append(type, that.type) - .append(factor, that.factor) - .append(getNodes(), that.getNodes()) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder() - .append(id) - .append(type) - .append(factor) - .append(nodeStatus) - .toHashCode(); - } - - @Override - public String toString() { - final StringBuilder b = - new StringBuilder(getClass().getSimpleName()).append("["); - b.append(" Id: ").append(id.getId()); - b.append(", Nodes: "); - nodeStatus.keySet().forEach(b::append); - b.append(", Type:").append(getType()); - b.append(", Factor:").append(getFactor()); - b.append(", State:").append(getPipelineState()); - b.append("]"); - return b.toString(); - } - - public static Builder newBuilder() { - return new Builder(); - } - - public static Builder newBuilder(Pipeline pipeline) { - return new Builder(pipeline); - } - - /** - * Builder class for Pipeline. - */ - public static class Builder { - private PipelineID id = null; - private ReplicationType type = null; - private ReplicationFactor factor = null; - private PipelineState state = null; - private Map nodeStatus = null; - private List nodeOrder = null; - private List nodesInOrder = null; - - public Builder() {} - - public Builder(Pipeline pipeline) { - this.id = pipeline.id; - this.type = pipeline.type; - this.factor = pipeline.factor; - this.state = pipeline.state; - this.nodeStatus = pipeline.nodeStatus; - this.nodesInOrder = pipeline.nodesInOrder.get(); - } - - public Builder setId(PipelineID id1) { - this.id = id1; - return this; - } - - public Builder setType(ReplicationType type1) { - this.type = type1; - return this; - } - - public Builder setFactor(ReplicationFactor factor1) { - this.factor = factor1; - return this; - } - - public Builder setState(PipelineState state1) { - this.state = state1; - return this; - } - - public Builder setNodes(List nodes) { - this.nodeStatus = new LinkedHashMap<>(); - nodes.forEach(node -> nodeStatus.put(node, -1L)); - return this; - } - - public Builder setNodesInOrder(List orders) { - this.nodeOrder = orders; - return this; - } - - public Pipeline build() { - Preconditions.checkNotNull(id); - Preconditions.checkNotNull(type); - Preconditions.checkNotNull(factor); - Preconditions.checkNotNull(state); - Preconditions.checkNotNull(nodeStatus); - Pipeline pipeline = new Pipeline(id, type, factor, state, nodeStatus); - - if (nodeOrder != null && !nodeOrder.isEmpty()) { - // This branch is for build from ProtoBuf - List nodesWithOrder = new ArrayList<>(); - for(int i = 0; i < nodeOrder.size(); i++) { - int nodeIndex = nodeOrder.get(i); - Iterator it = nodeStatus.keySet().iterator(); - while(it.hasNext() && nodeIndex >= 0) { - DatanodeDetails node = it.next(); - if (nodeIndex == 0) { - nodesWithOrder.add(node); - break; - } - nodeIndex--; - } - } - if (LOG.isDebugEnabled()) { - LOG.debug("Deserialize nodesInOrder {} in pipeline {}", - nodesWithOrder, id.toString()); - } - pipeline.setNodesInOrder(nodesWithOrder); - } else if (nodesInOrder != null){ - // This branch is for pipeline clone - pipeline.setNodesInOrder(nodesInOrder); - } - return pipeline; - } - } - - /** - * Possible Pipeline states in SCM. - */ - public enum PipelineState { - ALLOCATED, OPEN, DORMANT, CLOSED; - - public static PipelineState fromProtobuf(HddsProtos.PipelineState state) - throws UnknownPipelineStateException { - Preconditions.checkNotNull(state, "Pipeline state is null"); - switch (state) { - case PIPELINE_ALLOCATED: return ALLOCATED; - case PIPELINE_OPEN: return OPEN; - case PIPELINE_DORMANT: return DORMANT; - case PIPELINE_CLOSED: return CLOSED; - default: - throw new UnknownPipelineStateException( - "Pipeline state: " + state + " is not recognized."); - } - } - - public static HddsProtos.PipelineState getProtobuf(PipelineState state) - throws UnknownPipelineStateException { - Preconditions.checkNotNull(state, "Pipeline state is null"); - switch (state) { - case ALLOCATED: return HddsProtos.PipelineState.PIPELINE_ALLOCATED; - case OPEN: return HddsProtos.PipelineState.PIPELINE_OPEN; - case DORMANT: return HddsProtos.PipelineState.PIPELINE_DORMANT; - case CLOSED: return HddsProtos.PipelineState.PIPELINE_CLOSED; - default: - throw new UnknownPipelineStateException( - "Pipeline state: " + state + " is not recognized."); - } - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java deleted file mode 100644 index 76cf55e8b124e..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -import java.util.UUID; - -/** - * ID for the pipeline, the ID is based on UUID. - */ -public final class PipelineID { - - private UUID id; - - private PipelineID(UUID id) { - this.id = id; - } - - public static PipelineID randomId() { - return new PipelineID(UUID.randomUUID()); - } - - public static PipelineID valueOf(UUID id) { - return new PipelineID(id); - } - - public UUID getId() { - return id; - } - - public HddsProtos.PipelineID getProtobuf() { - return HddsProtos.PipelineID.newBuilder().setId(id.toString()).build(); - } - - public static PipelineID getFromProtobuf(HddsProtos.PipelineID protos) { - return new PipelineID(UUID.fromString(protos.getId())); - } - - @Override - public String toString() { - return "PipelineID=" + id; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - PipelineID that = (PipelineID) o; - - return id.equals(that.id); - } - - @Override - public int hashCode() { - return id.hashCode(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java deleted file mode 100644 index 2a89aab5288d0..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import java.io.IOException; - -/** - * Signals that a pipeline is missing from PipelineManager. - */ -public class PipelineNotFoundException extends IOException{ - /** - * Constructs an {@code PipelineNotFoundException} with {@code null} - * as its error detail message. - */ - public PipelineNotFoundException() { - super(); - } - - /** - * Constructs an {@code PipelineNotFoundException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public PipelineNotFoundException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/UnknownPipelineStateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/UnknownPipelineStateException.java deleted file mode 100644 index 7c75fc0a13974..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/UnknownPipelineStateException.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import java.io.IOException; - -/** - * Signals that a pipeline state is not recognized. - */ -public class UnknownPipelineStateException extends IOException { - /** - * Constructs an {@code UnknownPipelineStateException} with {@code null} - * as its error detail message. - */ - public UnknownPipelineStateException() { - super(); - } - - /** - * Constructs an {@code UnknownPipelineStateException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public UnknownPipelineStateException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java deleted file mode 100644 index 51adc88866123..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.pipeline; -/** - Ozone supports the notion of different kind of pipelines. - That means that we can have a replication pipeline build on - Ratis, Simple or some other protocol. All Pipeline managers - the entities in charge of pipelines reside in the package. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java deleted file mode 100644 index 10a9b1b5de3a0..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java +++ /dev/null @@ -1,127 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; - -import java.util.Set; - -/** - * Holds the nodes that currently host the container for an object key hash. - */ -@InterfaceAudience.Private -public final class LocatedContainer { - private final String key; - private final String matchedKeyPrefix; - private final String containerName; - private final Set locations; - private final DatanodeInfo leader; - - /** - * Creates a LocatedContainer. - * - * @param key object key - * @param matchedKeyPrefix prefix of key that was used to find the location - * @param containerName container name - * @param locations nodes that currently host the container - * @param leader node that currently acts as pipeline leader - */ - public LocatedContainer(String key, String matchedKeyPrefix, - String containerName, Set locations, DatanodeInfo leader) { - this.key = key; - this.matchedKeyPrefix = matchedKeyPrefix; - this.containerName = containerName; - this.locations = locations; - this.leader = leader; - } - - /** - * Returns the container name. - * - * @return container name - */ - public String getContainerName() { - return this.containerName; - } - - /** - * Returns the object key. - * - * @return object key - */ - public String getKey() { - return this.key; - } - - /** - * Returns the node that currently acts as pipeline leader. - * - * @return node that currently acts as pipeline leader - */ - public DatanodeInfo getLeader() { - return this.leader; - } - - /** - * Returns the nodes that currently host the container. - * - * @return {@code Set} nodes that currently host the container - */ - public Set getLocations() { - return this.locations; - } - - /** - * Returns the prefix of the key that was used to find the location. - * - * @return prefix of the key that was used to find the location - */ - public String getMatchedKeyPrefix() { - return this.matchedKeyPrefix; - } - - @Override - public boolean equals(Object otherObj) { - if (otherObj == null) { - return false; - } - if (!(otherObj instanceof LocatedContainer)) { - return false; - } - LocatedContainer other = (LocatedContainer)otherObj; - return this.key == null ? other.key == null : this.key.equals(other.key); - } - - @Override - public int hashCode() { - return key.hashCode(); - } - - @Override - public String toString() { - return getClass().getSimpleName() - + "{key=" + key - + "; matchedKeyPrefix=" + matchedKeyPrefix - + "; containerName=" + containerName - + "; locations=" + locations - + "; leader=" + leader - + "}"; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java deleted file mode 100644 index 18045f88cbd27..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.protocol; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.security.KerberosInfo; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; - -/** - * ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes - * to read/write a block. - */ -@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) -public interface ScmBlockLocationProtocol extends Closeable { - - @SuppressWarnings("checkstyle:ConstantName") - /** - * Version 1: Initial version. - */ - long versionID = 1L; - - /** - * Asks SCM where a block should be allocated. SCM responds with the - * set of datanodes that should be used creating this block. - * @param size - size of the block. - * @param numBlocks - number of blocks. - * @param type - replication type of the blocks. - * @param factor - replication factor of the blocks. - * @param excludeList List of datanodes/containers to exclude during block - * allocation. - * @return allocated block accessing info (key, pipeline). - * @throws IOException - */ - List allocateBlock(long size, int numBlocks, - ReplicationType type, ReplicationFactor factor, String owner, - ExcludeList excludeList) throws IOException; - - /** - * Delete blocks for a set of object keys. - * - * @param keyBlocksInfoList Map of object key and its blocks. - * @return list of block deletion results. - * @throws IOException if there is any failure. - */ - List - deleteKeyBlocks(List keyBlocksInfoList) throws IOException; - - /** - * Gets the Clusterid and SCM Id from SCM. - */ - ScmInfo getScmInfo() throws IOException; - - /** - * Sort datanodes with distance to client. - * @param nodes list of network name of each node. - * @param clientMachine client address, depends, can be hostname or ipaddress. - */ - List sortDatanodes(List nodes, - String clientMachine) throws IOException; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java deleted file mode 100644 index 0d2ecf7ac16cf..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; - -import java.util.List; -import java.util.stream.Collectors; - -/** - * Holds the nodes that currently host the block for a block key. - */ -@InterfaceAudience.Private -public final class ScmLocatedBlock { - private final String key; - private final List locations; - private final DatanodeInfo leader; - - /** - * Creates a ScmLocatedBlock. - * - * @param key object key - * @param locations nodes that currently host the block - * @param leader node that currently acts as pipeline leader - */ - public ScmLocatedBlock(final String key, final List locations, - final DatanodeInfo leader) { - this.key = key; - this.locations = locations; - this.leader = leader; - } - - /** - * Returns the object key. - * - * @return object key - */ - public String getKey() { - return this.key; - } - - /** - * Returns the node that currently acts as pipeline leader. - * - * @return node that currently acts as pipeline leader - */ - public DatanodeInfo getLeader() { - return this.leader; - } - - /** - * Returns the nodes that currently host the block. - * - * @return {@literal List} nodes that currently host the block - */ - public List getLocations() { - return this.locations; - } - - @Override - public boolean equals(Object otherObj) { - if (otherObj == null) { - return false; - } - if (!(otherObj instanceof ScmLocatedBlock)) { - return false; - } - ScmLocatedBlock other = (ScmLocatedBlock)otherObj; - return this.key == null ? other.key == null : this.key.equals(other.key); - } - - @Override - public int hashCode() { - return key.hashCode(); - } - - @Override - public String toString() { - return getClass().getSimpleName() + "{key=" + key + "; locations=" - + locations.stream().map(loc -> loc.toString()).collect(Collectors - .joining(",")) + "; leader=" + leader + "}"; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java deleted file mode 100644 index 88db8205a408f..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ /dev/null @@ -1,214 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.protocol; - -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import org.apache.hadoop.security.KerberosInfo; - -/** - * ContainerLocationProtocol is used by an HDFS node to find the set of nodes - * that currently host a container. - */ -@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) -public interface StorageContainerLocationProtocol extends Closeable { - - @SuppressWarnings("checkstyle:ConstantName") - /** - * Version 1: Initial version. - */ - long versionID = 1L; - - /** - * Asks SCM where a container should be allocated. SCM responds with the - * set of datanodes that should be used creating this container. - * - */ - ContainerWithPipeline allocateContainer( - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor factor, String owner) - throws IOException; - - /** - * Ask SCM the location of the container. SCM responds with a group of - * nodes where this container and its replicas are located. - * - * @param containerID - ID of the container. - * @return ContainerInfo - the container info such as where the pipeline - * is located. - * @throws IOException - */ - ContainerInfo getContainer(long containerID) throws IOException; - - /** - * Ask SCM the location of the container. SCM responds with a group of - * nodes where this container and its replicas are located. - * - * @param containerID - ID of the container. - * @return ContainerWithPipeline - the container info with the pipeline. - * @throws IOException - */ - ContainerWithPipeline getContainerWithPipeline(long containerID) - throws IOException; - - /** - * Ask SCM a list of containers with a range of container names - * and the limit of count. - * Search container names between start name(exclusive), and - * use prefix name to filter the result. the max size of the - * searching range cannot exceed the value of count. - * - * @param startContainerID start container ID. - * @param count count, if count {@literal <} 0, the max size is unlimited.( - * Usually the count will be replace with a very big - * value instead of being unlimited in case the db is very big) - * - * @return a list of container. - * @throws IOException - */ - List listContainer(long startContainerID, int count) - throws IOException; - - /** - * Deletes a container in SCM. - * - * @param containerID - * @throws IOException - * if failed to delete the container mapping from db store - * or container doesn't exist. - */ - void deleteContainer(long containerID) throws IOException; - - /** - * Queries a list of Node Statuses. - * @param state - * @return List of Datanodes. - */ - List queryNode(HddsProtos.NodeState state, - HddsProtos.QueryScope queryScope, String poolName) throws IOException; - - /** - * Notify from client when begin or finish creating objects like pipeline - * or containers on datanodes. - * Container will be in Operational state after that. - * @param type object type - * @param id object id - * @param op operation type (e.g., create, close, delete) - * @param stage creation stage - */ - void notifyObjectStageChange( - ObjectStageChangeRequestProto.Type type, long id, - ObjectStageChangeRequestProto.Op op, - ObjectStageChangeRequestProto.Stage stage) throws IOException; - - /** - * Creates a replication pipeline of a specified type. - * @param type - replication type - * @param factor - factor 1 or 3 - * @param nodePool - optional machine list to build a pipeline. - * @throws IOException - */ - Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool) - throws IOException; - - /** - * Returns the list of active Pipelines. - * - * @return list of Pipeline - * - * @throws IOException in case of any exception - */ - List listPipelines() throws IOException; - - /** - * Activates a dormant pipeline. - * - * @param pipelineID ID of the pipeline to activate. - * @throws IOException in case of any Exception - */ - void activatePipeline(HddsProtos.PipelineID pipelineID) throws IOException; - - /** - * Deactivates an active pipeline. - * - * @param pipelineID ID of the pipeline to deactivate. - * @throws IOException in case of any Exception - */ - void deactivatePipeline(HddsProtos.PipelineID pipelineID) throws IOException; - - /** - * Closes a pipeline given the pipelineID. - * - * @param pipelineID ID of the pipeline to demolish - * @throws IOException - */ - void closePipeline(HddsProtos.PipelineID pipelineID) throws IOException; - - /** - * Returns information about SCM. - * - * @return {@link ScmInfo} - * @throws IOException - */ - ScmInfo getScmInfo() throws IOException; - - /** - * Check if SCM is in safe mode. - * - * @return Returns true if SCM is in safe mode else returns false. - * @throws IOException - */ - boolean inSafeMode() throws IOException; - - /** - * Force SCM out of Safe mode. - * - * @return returns true if operation is successful. - * @throws IOException - */ - boolean forceExitSafeMode() throws IOException; - - /** - * Start ReplicationManager. - */ - void startReplicationManager() throws IOException; - - /** - * Stop ReplicationManager. - */ - void stopReplicationManager() throws IOException; - - /** - * Returns ReplicationManager status. - * - * @return True if ReplicationManager is running, false otherwise. - */ - boolean getReplicationManagerStatus() throws IOException; - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java deleted file mode 100644 index b56a749453f14..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.protocol; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java deleted file mode 100644 index a262bb5bdbdd9..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java +++ /dev/null @@ -1,273 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.protocolPB; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationRequest; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Type; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateBlockResponse; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockRequestProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.KeyBlocks; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos - .SortDatanodesRequestProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos - .SortDatanodesResponseProto; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtocolTranslator; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; - -import com.google.common.base.Preconditions; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; - -import static org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Status.OK; - -/** - * This class is the client-side translator to translate the requests made on - * the {@link ScmBlockLocationProtocol} interface to the RPC server - * implementing {@link ScmBlockLocationProtocolPB}. - */ -@InterfaceAudience.Private -public final class ScmBlockLocationProtocolClientSideTranslatorPB - implements ScmBlockLocationProtocol, ProtocolTranslator, Closeable { - - /** - * RpcController is not used and hence is set to null. - */ - private static final RpcController NULL_RPC_CONTROLLER = null; - - private final ScmBlockLocationProtocolPB rpcProxy; - - /** - * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB. - * - * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy - */ - public ScmBlockLocationProtocolClientSideTranslatorPB( - ScmBlockLocationProtocolPB rpcProxy) { - this.rpcProxy = rpcProxy; - } - - /** - * Returns a SCMBlockLocationRequest builder with specified type. - * @param cmdType type of the request - */ - private SCMBlockLocationRequest.Builder createSCMBlockRequest(Type cmdType) { - return SCMBlockLocationRequest.newBuilder() - .setCmdType(cmdType) - .setTraceID(TracingUtil.exportCurrentSpan()); - } - - /** - * Submits client request to SCM server. - * @param req client request - * @return response from SCM - * @throws IOException thrown if any Protobuf service exception occurs - */ - private SCMBlockLocationResponse submitRequest( - SCMBlockLocationRequest req) throws IOException { - try { - SCMBlockLocationResponse response = - rpcProxy.send(NULL_RPC_CONTROLLER, req); - return response; - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - } - - private SCMBlockLocationResponse handleError(SCMBlockLocationResponse resp) - throws SCMException { - if (resp.getStatus() != OK) { - throw new SCMException(resp.getMessage(), - SCMException.ResultCodes.values()[resp.getStatus().ordinal()]); - } - return resp; - } - - /** - * Asks SCM where a block should be allocated. SCM responds with the - * set of datanodes that should be used creating this block. - * @param size - size of the block. - * @param num - number of blocks. - * @param type - replication type of the blocks. - * @param factor - replication factor of the blocks. - * @param excludeList - exclude list while allocating blocks. - * @return allocated block accessing info (key, pipeline). - * @throws IOException - */ - @Override - public List allocateBlock(long size, int num, - HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, - String owner, ExcludeList excludeList) throws IOException { - Preconditions.checkArgument(size > 0, "block size must be greater than 0"); - - AllocateScmBlockRequestProto request = - AllocateScmBlockRequestProto.newBuilder() - .setSize(size) - .setNumBlocks(num) - .setType(type) - .setFactor(factor) - .setOwner(owner) - .setExcludeList(excludeList.getProtoBuf()) - .build(); - - SCMBlockLocationRequest wrapper = createSCMBlockRequest( - Type.AllocateScmBlock) - .setAllocateScmBlockRequest(request) - .build(); - - final SCMBlockLocationResponse wrappedResponse = - handleError(submitRequest(wrapper)); - final AllocateScmBlockResponseProto response = - wrappedResponse.getAllocateScmBlockResponse(); - - List blocks = new ArrayList<>(response.getBlocksCount()); - for (AllocateBlockResponse resp : response.getBlocksList()) { - AllocatedBlock.Builder builder = new AllocatedBlock.Builder() - .setContainerBlockID( - ContainerBlockID.getFromProtobuf(resp.getContainerBlockID())) - .setPipeline(Pipeline.getFromProtobuf(resp.getPipeline())); - blocks.add(builder.build()); - } - - return blocks; - } - - /** - * Delete the set of keys specified. - * - * @param keyBlocksInfoList batch of block keys to delete. - * @return list of block deletion results. - * @throws IOException if there is any failure. - * - */ - @Override - public List deleteKeyBlocks( - List keyBlocksInfoList) throws IOException { - List keyBlocksProto = keyBlocksInfoList.stream() - .map(BlockGroup::getProto).collect(Collectors.toList()); - DeleteScmKeyBlocksRequestProto request = DeleteScmKeyBlocksRequestProto - .newBuilder() - .addAllKeyBlocks(keyBlocksProto) - .build(); - - SCMBlockLocationRequest wrapper = createSCMBlockRequest( - Type.DeleteScmKeyBlocks) - .setDeleteScmKeyBlocksRequest(request) - .build(); - - final SCMBlockLocationResponse wrappedResponse = - handleError(submitRequest(wrapper)); - final DeleteScmKeyBlocksResponseProto resp = - wrappedResponse.getDeleteScmKeyBlocksResponse(); - - List results = - new ArrayList<>(resp.getResultsCount()); - results.addAll(resp.getResultsList().stream().map( - result -> new DeleteBlockGroupResult(result.getObjectKey(), - DeleteBlockGroupResult - .convertBlockResultProto(result.getBlockResultsList()))) - .collect(Collectors.toList())); - return results; - } - - /** - * Gets the cluster Id and Scm Id from SCM. - * @return ScmInfo - * @throws IOException - */ - @Override - public ScmInfo getScmInfo() throws IOException { - HddsProtos.GetScmInfoRequestProto request = - HddsProtos.GetScmInfoRequestProto.getDefaultInstance(); - HddsProtos.GetScmInfoResponseProto resp; - - SCMBlockLocationRequest wrapper = createSCMBlockRequest( - Type.GetScmInfo) - .setGetScmInfoRequest(request) - .build(); - - final SCMBlockLocationResponse wrappedResponse = - handleError(submitRequest(wrapper)); - resp = wrappedResponse.getGetScmInfoResponse(); - ScmInfo.Builder builder = new ScmInfo.Builder() - .setClusterId(resp.getClusterId()) - .setScmId(resp.getScmId()); - return builder.build(); - } - - /** - * Sort the datanodes based on distance from client. - * @return List - * @throws IOException - */ - @Override - public List sortDatanodes(List nodes, - String clientMachine) throws IOException { - SortDatanodesRequestProto request = SortDatanodesRequestProto - .newBuilder() - .addAllNodeNetworkName(nodes) - .setClient(clientMachine) - .build(); - SCMBlockLocationRequest wrapper = createSCMBlockRequest( - Type.SortDatanodes) - .setSortDatanodesRequest(request) - .build(); - - final SCMBlockLocationResponse wrappedResponse = - handleError(submitRequest(wrapper)); - SortDatanodesResponseProto resp = - wrappedResponse.getSortDatanodesResponse(); - List results = new ArrayList<>(resp.getNodeCount()); - results.addAll(resp.getNodeList().stream() - .map(node -> DatanodeDetails.getFromProtoBuf(node)) - .collect(Collectors.toList())); - return results; - } - - @Override - public Object getUnderlyingProxyObject() { - return rpcProxy; - } - - @Override - public void close() { - RPC.stopProxy(rpcProxy); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java deleted file mode 100644 index 1ba698bf0e30a..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.protocolPB; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos - .ScmBlockLocationProtocolService; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ipc.ProtocolInfo; -import org.apache.hadoop.security.KerberosInfo; - -/** - * Protocol used from an HDFS node to StorageContainerManager. This extends the - * Protocol Buffers service interface to add Hadoop-specific annotations. - */ -@ProtocolInfo(protocolName = - "org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol", - protocolVersion = 1) -@InterfaceAudience.Private -@KerberosInfo( - serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) -public interface ScmBlockLocationProtocolPB - extends ScmBlockLocationProtocolService.BlockingInterface { -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java deleted file mode 100644 index 01db597dfae1a..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ /dev/null @@ -1,475 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.protocolPB; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.function.Consumer; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.GetScmInfoResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ActivatePipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ClosePipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest.Builder; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtocolTranslator; -import org.apache.hadoop.ipc.RPC; - -import com.google.common.base.Preconditions; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; - -/** - * This class is the client-side translator to translate the requests made on - * the {@link StorageContainerLocationProtocol} interface to the RPC server - * implementing {@link StorageContainerLocationProtocolPB}. - */ -@InterfaceAudience.Private -public final class StorageContainerLocationProtocolClientSideTranslatorPB - implements StorageContainerLocationProtocol, ProtocolTranslator, Closeable { - - /** - * RpcController is not used and hence is set to null. - */ - private static final RpcController NULL_RPC_CONTROLLER = null; - - private final StorageContainerLocationProtocolPB rpcProxy; - - /** - * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB. - * - * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy - */ - public StorageContainerLocationProtocolClientSideTranslatorPB( - StorageContainerLocationProtocolPB rpcProxy) { - this.rpcProxy = rpcProxy; - } - - /** - * Helper method to wrap the request and send the message. - */ - private ScmContainerLocationResponse submitRequest( - StorageContainerLocationProtocolProtos.Type type, - Consumer builderConsumer) throws IOException { - final ScmContainerLocationResponse response; - try { - - Builder builder = ScmContainerLocationRequest.newBuilder() - .setCmdType(type) - .setTraceID(TracingUtil.exportCurrentSpan()); - builderConsumer.accept(builder); - ScmContainerLocationRequest wrapper = builder.build(); - - response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper); - } catch (ServiceException ex) { - throw ProtobufHelper.getRemoteException(ex); - } - return response; - } - - /** - * Asks SCM where a container should be allocated. SCM responds with the set - * of datanodes that should be used creating this container. Ozone/SCM only - * supports replication factor of either 1 or 3. - * - * @param type - Replication Type - * @param factor - Replication Count - */ - @Override - public ContainerWithPipeline allocateContainer( - HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, - String owner) throws IOException { - - ContainerRequestProto request = ContainerRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setReplicationFactor(factor) - .setReplicationType(type) - .setOwner(owner) - .build(); - - ContainerResponseProto response = - submitRequest(Type.AllocateContainer, - builder -> builder.setContainerRequest(request)) - .getContainerResponse(); - //TODO should be migrated to use the top level status structure. - if (response.getErrorCode() != ContainerResponseProto.Error.success) { - throw new IOException(response.hasErrorMessage() ? - response.getErrorMessage() : "Allocate container failed."); - } - return ContainerWithPipeline.fromProtobuf( - response.getContainerWithPipeline()); - } - - public ContainerInfo getContainer(long containerID) throws IOException { - Preconditions.checkState(containerID >= 0, - "Container ID cannot be negative"); - GetContainerRequestProto request = GetContainerRequestProto - .newBuilder() - .setContainerID(containerID) - .setTraceID(TracingUtil.exportCurrentSpan()) - .build(); - ScmContainerLocationResponse response = - submitRequest(Type.GetContainer, - (builder) -> builder.setGetContainerRequest(request)); - return ContainerInfo - .fromProtobuf(response.getGetContainerResponse().getContainerInfo()); - - } - - /** - * {@inheritDoc} - */ - public ContainerWithPipeline getContainerWithPipeline(long containerID) - throws IOException { - Preconditions.checkState(containerID >= 0, - "Container ID cannot be negative"); - GetContainerWithPipelineRequestProto request = - GetContainerWithPipelineRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setContainerID(containerID).build(); - - ScmContainerLocationResponse response = - submitRequest(Type.GetContainerWithPipeline, - (builder) -> builder.setGetContainerWithPipelineRequest(request)); - - return ContainerWithPipeline.fromProtobuf( - response.getGetContainerWithPipelineResponse() - .getContainerWithPipeline()); - - } - - /** - * {@inheritDoc} - */ - @Override - public List listContainer(long startContainerID, int count) - throws IOException { - Preconditions.checkState(startContainerID >= 0, - "Container ID cannot be negative."); - Preconditions.checkState(count > 0, - "Container count must be greater than 0."); - SCMListContainerRequestProto.Builder builder = SCMListContainerRequestProto - .newBuilder(); - builder.setStartContainerID(startContainerID); - builder.setCount(count); - builder.setTraceID(TracingUtil.exportCurrentSpan()); - SCMListContainerRequestProto request = builder.build(); - - SCMListContainerResponseProto response = - submitRequest(Type.ListContainer, - builder1 -> builder1.setScmListContainerRequest(request)) - .getScmListContainerResponse(); - List containerList = new ArrayList<>(); - for (HddsProtos.ContainerInfoProto containerInfoProto : response - .getContainersList()) { - containerList.add(ContainerInfo.fromProtobuf(containerInfoProto)); - } - return containerList; - - } - - /** - * Ask SCM to delete a container by name. SCM will remove - * the container mapping in its database. - */ - @Override - public void deleteContainer(long containerID) - throws IOException { - Preconditions.checkState(containerID >= 0, - "Container ID cannot be negative"); - SCMDeleteContainerRequestProto request = SCMDeleteContainerRequestProto - .newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setContainerID(containerID) - .build(); - submitRequest(Type.DeleteContainer, - builder -> builder.setScmDeleteContainerRequest(request)); - - } - - /** - * Queries a list of Node Statuses. - */ - @Override - public List queryNode(HddsProtos.NodeState - nodeStatuses, HddsProtos.QueryScope queryScope, String poolName) - throws IOException { - // TODO : We support only cluster wide query right now. So ignoring checking - // queryScope and poolName - Preconditions.checkNotNull(nodeStatuses); - NodeQueryRequestProto request = NodeQueryRequestProto.newBuilder() - .setState(nodeStatuses) - .setTraceID(TracingUtil.exportCurrentSpan()) - .setScope(queryScope).setPoolName(poolName).build(); - NodeQueryResponseProto response = submitRequest(Type.QueryNode, - builder -> builder.setNodeQueryRequest(request)).getNodeQueryResponse(); - return response.getDatanodesList(); - - } - - /** - * Notify from client that creates object on datanodes. - * - * @param type object type - * @param id object id - * @param op operation type (e.g., create, close, delete) - * @param stage object creation stage : begin/complete - */ - @Override - public void notifyObjectStageChange( - ObjectStageChangeRequestProto.Type type, long id, - ObjectStageChangeRequestProto.Op op, - ObjectStageChangeRequestProto.Stage stage) throws IOException { - Preconditions.checkState(id >= 0, - "Object id cannot be negative."); - ObjectStageChangeRequestProto request = - ObjectStageChangeRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setType(type) - .setId(id) - .setOp(op) - .setStage(stage) - .build(); - submitRequest(Type.NotifyObjectStageChange, - builder -> builder.setObjectStageChangeRequest(request)); - - } - - /** - * Creates a replication pipeline of a specified type. - * - * @param replicationType - replication type - * @param factor - factor 1 or 3 - * @param nodePool - optional machine list to build a pipeline. - */ - @Override - public Pipeline createReplicationPipeline(HddsProtos.ReplicationType - replicationType, HddsProtos.ReplicationFactor factor, HddsProtos - .NodePool nodePool) throws IOException { - PipelineRequestProto request = PipelineRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setNodePool(nodePool) - .setReplicationFactor(factor) - .setReplicationType(replicationType) - .build(); - - PipelineResponseProto response = - submitRequest(Type.AllocatePipeline, - builder -> builder.setPipelineRequest(request)) - .getPipelineResponse(); - if (response.getErrorCode() == - PipelineResponseProto.Error.success) { - Preconditions.checkState(response.hasPipeline(), "With success, " + - "must come a pipeline"); - return Pipeline.getFromProtobuf(response.getPipeline()); - } else { - String errorMessage = String.format("create replication pipeline " + - "failed. code : %s Message: %s", response.getErrorCode(), - response.hasErrorMessage() ? response.getErrorMessage() : ""); - throw new IOException(errorMessage); - } - - } - - @Override - public List listPipelines() throws IOException { - ListPipelineRequestProto request = ListPipelineRequestProto - .newBuilder().setTraceID(TracingUtil.exportCurrentSpan()) - .build(); - - ListPipelineResponseProto response = submitRequest(Type.ListPipelines, - builder -> builder.setListPipelineRequest(request)) - .getListPipelineResponse(); - - List list = new ArrayList<>(); - for (HddsProtos.Pipeline pipeline : response.getPipelinesList()) { - Pipeline fromProtobuf = Pipeline.getFromProtobuf(pipeline); - list.add(fromProtobuf); - } - return list; - - } - - @Override - public void activatePipeline(HddsProtos.PipelineID pipelineID) - throws IOException { - ActivatePipelineRequestProto request = - ActivatePipelineRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setPipelineID(pipelineID) - .build(); - submitRequest(Type.ActivatePipeline, - builder -> builder.setActivatePipelineRequest(request)); - - } - - @Override - public void deactivatePipeline(HddsProtos.PipelineID pipelineID) - throws IOException { - - DeactivatePipelineRequestProto request = - DeactivatePipelineRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setPipelineID(pipelineID) - .build(); - submitRequest(Type.DeactivatePipeline, - builder -> builder.setDeactivatePipelineRequest(request)); - } - - @Override - public void closePipeline(HddsProtos.PipelineID pipelineID) - throws IOException { - - ClosePipelineRequestProto request = - ClosePipelineRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setPipelineID(pipelineID) - .build(); - submitRequest(Type.ClosePipeline, - builder -> builder.setClosePipelineRequest(request)); - - } - - @Override - public ScmInfo getScmInfo() throws IOException { - HddsProtos.GetScmInfoRequestProto request = - HddsProtos.GetScmInfoRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .build(); - - GetScmInfoResponseProto resp = submitRequest(Type.GetScmInfo, - builder -> builder.setGetScmInfoRequest(request)) - .getGetScmInfoResponse(); - ScmInfo.Builder builder = new ScmInfo.Builder() - .setClusterId(resp.getClusterId()) - .setScmId(resp.getScmId()); - return builder.build(); - - } - - /** - * Check if SCM is in safe mode. - * - * @return Returns true if SCM is in safe mode else returns false. - */ - @Override - public boolean inSafeMode() throws IOException { - InSafeModeRequestProto request = - InSafeModeRequestProto.getDefaultInstance(); - - return submitRequest(Type.InSafeMode, - builder -> builder.setInSafeModeRequest(request)) - .getInSafeModeResponse().getInSafeMode(); - - } - - /** - * Force SCM out of Safe mode. - * - * @return returns true if operation is successful. - */ - @Override - public boolean forceExitSafeMode() throws IOException { - ForceExitSafeModeRequestProto request = - ForceExitSafeModeRequestProto.getDefaultInstance(); - ForceExitSafeModeResponseProto resp = - submitRequest(Type.ForceExitSafeMode, - builder -> builder.setForceExitSafeModeRequest(request)) - .getForceExitSafeModeResponse(); - - return resp.getExitedSafeMode(); - - } - - @Override - public void startReplicationManager() throws IOException { - - StartReplicationManagerRequestProto request = - StartReplicationManagerRequestProto.getDefaultInstance(); - submitRequest(Type.StartReplicationManager, - builder -> builder.setStartReplicationManagerRequest(request)); - - } - - @Override - public void stopReplicationManager() throws IOException { - - StopReplicationManagerRequestProto request = - StopReplicationManagerRequestProto.getDefaultInstance(); - submitRequest(Type.StopReplicationManager, - builder -> builder.setStopReplicationManagerRequest(request)); - - } - - @Override - public boolean getReplicationManagerStatus() throws IOException { - - ReplicationManagerStatusRequestProto request = - ReplicationManagerStatusRequestProto.getDefaultInstance(); - ReplicationManagerStatusResponseProto response = - submitRequest(Type.GetReplicationManagerStatus, - builder -> builder.setSeplicationManagerStatusRequest(request)) - .getReplicationManagerStatusResponse(); - return response.getIsRunning(); - - } - - @Override - public Object getUnderlyingProxyObject() { - return rpcProxy; - } - - @Override - public void close() { - RPC.stopProxy(rpcProxy); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java deleted file mode 100644 index f0af7aaed8720..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.protocolPB; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerLocationProtocolProtos - .StorageContainerLocationProtocolService; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ipc.ProtocolInfo; -import org.apache.hadoop.security.KerberosInfo; - -/** - * Protocol used from an HDFS node to StorageContainerManager. This extends the - * Protocol Buffers service interface to add Hadoop-specific annotations. - */ -@ProtocolInfo(protocolName = - "org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol", - protocolVersion = 1) -@KerberosInfo( - serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) -@InterfaceAudience.Private -public interface StorageContainerLocationProtocolPB - extends StorageContainerLocationProtocolService.BlockingInterface { -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java deleted file mode 100644 index 652ae60973ca5..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.protocolPB; - -/** - * This package contains classes for the client of the storage container - * protocol. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/CheckedBiFunction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/CheckedBiFunction.java deleted file mode 100644 index df84859ab0294..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/CheckedBiFunction.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - - -import java.io.IOException; - -/** - * Defines a functional interface having two inputs which throws IOException. - */ -@FunctionalInterface -public interface CheckedBiFunction { - void apply(LEFT left, RIGHT right) throws THROWABLE; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java deleted file mode 100644 index d0ba60d9ad7e8..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java +++ /dev/null @@ -1,573 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -import org.apache.hadoop.hdds.scm.XceiverClientReply; -import org.apache.hadoop.hdds.scm.container.common.helpers - .BlockNotCommittedException; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSelector; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .CloseContainerRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .DatanodeBlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .GetBlockRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .GetBlockResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .GetSmallFileRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .GetSmallFileResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .PutBlockRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .PutSmallFileRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadChunkRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadContainerRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadContainerResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .WriteChunkRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - PutSmallFileResponseProto; -import org.apache.hadoop.hdds.client.BlockID; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutionException; - -/** - * Implementation of all container protocol calls performed by Container - * clients. - */ -public final class ContainerProtocolCalls { - - /** - * There is no need to instantiate this class. - */ - private ContainerProtocolCalls() { - } - - /** - * Calls the container protocol to get a container block. - * - * @param xceiverClient client to perform call - * @param datanodeBlockID blockID to identify container - * @return container protocol get block response - * @throws IOException if there is an I/O error while performing the call - */ - public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, - DatanodeBlockID datanodeBlockID) throws IOException { - GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto - .newBuilder() - .setBlockID(datanodeBlockID); - String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); - - ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto - .newBuilder() - .setCmdType(Type.GetBlock) - .setContainerID(datanodeBlockID.getContainerID()) - .setDatanodeUuid(id) - .setGetBlock(readBlockRequest); - String encodedToken = getEncodedBlockToken(getService(datanodeBlockID)); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - - ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - xceiverClient.sendCommand(request, getValidatorList()); - return response.getGetBlock(); - } - - /** - * Calls the container protocol to get the length of a committed block. - * - * @param xceiverClient client to perform call - * @param blockID blockId for the Block - * @return container protocol getLastCommittedBlockLength response - * @throws IOException if there is an I/O error while performing the call - */ - public static ContainerProtos.GetCommittedBlockLengthResponseProto - getCommittedBlockLength( - XceiverClientSpi xceiverClient, BlockID blockID) - throws IOException { - ContainerProtos.GetCommittedBlockLengthRequestProto.Builder - getBlockLengthRequestBuilder = - ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder(). - setBlockID(blockID.getDatanodeBlockIDProtobuf()); - String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder builder = - ContainerCommandRequestProto.newBuilder() - .setCmdType(Type.GetCommittedBlockLength) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(id) - .setGetCommittedBlockLength(getBlockLengthRequestBuilder); - String encodedToken = getEncodedBlockToken(new Text(blockID. - getContainerBlockID().toString())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - xceiverClient.sendCommand(request, getValidatorList()); - return response.getGetCommittedBlockLength(); - } - - /** - * Calls the container protocol to put a container block. - * - * @param xceiverClient client to perform call - * @param containerBlockData block data to identify container - * @return putBlockResponse - * @throws IOException if there is an I/O error while performing the call - */ - public static ContainerProtos.PutBlockResponseProto putBlock( - XceiverClientSpi xceiverClient, BlockData containerBlockData) - throws IOException { - PutBlockRequestProto.Builder createBlockRequest = - PutBlockRequestProto.newBuilder().setBlockData(containerBlockData); - String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder builder = - ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutBlock) - .setContainerID(containerBlockData.getBlockID().getContainerID()) - .setDatanodeUuid(id) - .setPutBlock(createBlockRequest); - String encodedToken = - getEncodedBlockToken(getService(containerBlockData.getBlockID())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - xceiverClient.sendCommand(request, getValidatorList()); - return response.getPutBlock(); - } - - /** - * Calls the container protocol to put a container block. - * - * @param xceiverClient client to perform call - * @param containerBlockData block data to identify container - * @return putBlockResponse - * @throws IOException if there is an error while performing the call - * @throws InterruptedException - * @throws ExecutionException - */ - public static XceiverClientReply putBlockAsync( - XceiverClientSpi xceiverClient, BlockData containerBlockData) - throws IOException, InterruptedException, ExecutionException { - PutBlockRequestProto.Builder createBlockRequest = - PutBlockRequestProto.newBuilder().setBlockData(containerBlockData); - String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder builder = - ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutBlock) - .setContainerID(containerBlockData.getBlockID().getContainerID()) - .setDatanodeUuid(id) - .setPutBlock(createBlockRequest); - String encodedToken = - getEncodedBlockToken(getService(containerBlockData.getBlockID())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - return xceiverClient.sendCommandAsync(request); - } - - /** - * Calls the container protocol to read a chunk. - * - * @param xceiverClient client to perform call - * @param chunk information about chunk to read - * @param blockID ID of the block - * @param validators functions to validate the response - * @return container protocol read chunk response - * @throws IOException if there is an I/O error while performing the call - */ - public static ContainerProtos.ReadChunkResponseProto readChunk( - XceiverClientSpi xceiverClient, ChunkInfo chunk, BlockID blockID, - List validators) throws IOException { - ReadChunkRequestProto.Builder readChunkRequest = - ReadChunkRequestProto.newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()) - .setChunkData(chunk); - String id = xceiverClient.getPipeline().getClosestNode().getUuidString(); - ContainerCommandRequestProto.Builder builder = - ContainerCommandRequestProto.newBuilder().setCmdType(Type.ReadChunk) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(id).setReadChunk(readChunkRequest); - String encodedToken = getEncodedBlockToken(new Text(blockID. - getContainerBlockID().toString())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto reply = - xceiverClient.sendCommand(request, validators); - return reply.getReadChunk(); - } - - /** - * Calls the container protocol to write a chunk. - * - * @param xceiverClient client to perform call - * @param chunk information about chunk to write - * @param blockID ID of the block - * @param data the data of the chunk to write - * @throws IOException if there is an error while performing the call - */ - public static void writeChunk(XceiverClientSpi xceiverClient, ChunkInfo chunk, - BlockID blockID, ByteString data) - throws IOException { - WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto - .newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()) - .setChunkData(chunk) - .setData(data); - String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto - .newBuilder() - .setCmdType(Type.WriteChunk) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(id) - .setWriteChunk(writeChunkRequest); - String encodedToken = getEncodedBlockToken(new Text(blockID. - getContainerBlockID().toString())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - xceiverClient.sendCommand(request, getValidatorList()); - } - - /** - * Calls the container protocol to write a chunk. - * - * @param xceiverClient client to perform call - * @param chunk information about chunk to write - * @param blockID ID of the block - * @param data the data of the chunk to write - * @throws IOException if there is an I/O error while performing the call - */ - public static XceiverClientReply writeChunkAsync( - XceiverClientSpi xceiverClient, ChunkInfo chunk, BlockID blockID, - ByteString data) - throws IOException, ExecutionException, InterruptedException { - WriteChunkRequestProto.Builder writeChunkRequest = - WriteChunkRequestProto.newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()) - .setChunkData(chunk).setData(data); - String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder builder = - ContainerCommandRequestProto.newBuilder().setCmdType(Type.WriteChunk) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(id).setWriteChunk(writeChunkRequest); - String encodedToken = getEncodedBlockToken(new Text(blockID. - getContainerBlockID().toString())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - return xceiverClient.sendCommandAsync(request); - } - - /** - * Allows writing a small file using single RPC. This takes the container - * name, block name and data to write sends all that data to the container - * using a single RPC. This API is designed to be used for files which are - * smaller than 1 MB. - * - * @param client - client that communicates with the container. - * @param blockID - ID of the block - * @param data - Data to be written into the container. - * @return container protocol writeSmallFile response - * @throws IOException - */ - public static PutSmallFileResponseProto writeSmallFile( - XceiverClientSpi client, BlockID blockID, byte[] data) - throws IOException { - - BlockData containerBlockData = - BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf()) - .build(); - PutBlockRequestProto.Builder createBlockRequest = - PutBlockRequestProto.newBuilder() - .setBlockData(containerBlockData); - - KeyValue keyValue = - KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true") - .build(); - Checksum checksum = new Checksum(); - ChecksumData checksumData = checksum.computeChecksum(data, 0, data.length); - ChunkInfo chunk = - ChunkInfo.newBuilder() - .setChunkName(blockID.getLocalID() + "_chunk") - .setOffset(0) - .setLen(data.length) - .addMetadata(keyValue) - .setChecksumData(checksumData.getProtoBufMessage()) - .build(); - - PutSmallFileRequestProto putSmallFileRequest = - PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk) - .setBlock(createBlockRequest).setData(ByteString.copyFrom(data)) - .build(); - - String id = client.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder builder = - ContainerCommandRequestProto.newBuilder() - .setCmdType(Type.PutSmallFile) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(id) - .setPutSmallFile(putSmallFileRequest); - String encodedToken = getEncodedBlockToken(new Text(blockID. - getContainerBlockID().toString())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - client.sendCommand(request, getValidatorList()); - return response.getPutSmallFile(); - } - - /** - * createContainer call that creates a container on the datanode. - * @param client - client - * @param containerID - ID of container - * @param encodedToken - encodedToken if security is enabled - * @throws IOException - */ - public static void createContainer(XceiverClientSpi client, long containerID, - String encodedToken) throws IOException { - ContainerProtos.CreateContainerRequestProto.Builder createRequest = - ContainerProtos.CreateContainerRequestProto - .newBuilder(); - createRequest.setContainerType(ContainerProtos.ContainerType - .KeyValueContainer); - - String id = client.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder request = - ContainerCommandRequestProto.newBuilder(); - if (encodedToken != null) { - request.setEncodedToken(encodedToken); - } - request.setCmdType(ContainerProtos.Type.CreateContainer); - request.setContainerID(containerID); - request.setCreateContainer(createRequest.build()); - request.setDatanodeUuid(id); - client.sendCommand(request.build(), getValidatorList()); - } - - /** - * Deletes a container from a pipeline. - * - * @param client - * @param force whether or not to forcibly delete the container. - * @param encodedToken - encodedToken if security is enabled - * @throws IOException - */ - public static void deleteContainer(XceiverClientSpi client, long containerID, - boolean force, String encodedToken) throws IOException { - ContainerProtos.DeleteContainerRequestProto.Builder deleteRequest = - ContainerProtos.DeleteContainerRequestProto.newBuilder(); - deleteRequest.setForceDelete(force); - String id = client.getPipeline().getFirstNode().getUuidString(); - - ContainerCommandRequestProto.Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.DeleteContainer); - request.setContainerID(containerID); - request.setDeleteContainer(deleteRequest); - request.setDatanodeUuid(id); - if (encodedToken != null) { - request.setEncodedToken(encodedToken); - } - client.sendCommand(request.build(), getValidatorList()); - } - - /** - * Close a container. - * - * @param client - * @param containerID - * @param encodedToken - encodedToken if security is enabled - * @throws IOException - */ - public static void closeContainer(XceiverClientSpi client, - long containerID, String encodedToken) - throws IOException { - String id = client.getPipeline().getFirstNode().getUuidString(); - - ContainerCommandRequestProto.Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(Type.CloseContainer); - request.setContainerID(containerID); - request.setCloseContainer(CloseContainerRequestProto.getDefaultInstance()); - request.setDatanodeUuid(id); - if(encodedToken != null) { - request.setEncodedToken(encodedToken); - } - client.sendCommand(request.build(), getValidatorList()); - } - - /** - * readContainer call that gets meta data from an existing container. - * - * @param client - client - * @param encodedToken - encodedToken if security is enabled - * @throws IOException - */ - public static ReadContainerResponseProto readContainer( - XceiverClientSpi client, long containerID, String encodedToken) - throws IOException { - String id = client.getPipeline().getFirstNode().getUuidString(); - - ContainerCommandRequestProto.Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(Type.ReadContainer); - request.setContainerID(containerID); - request.setReadContainer(ReadContainerRequestProto.getDefaultInstance()); - request.setDatanodeUuid(id); - if(encodedToken != null) { - request.setEncodedToken(encodedToken); - } - ContainerCommandResponseProto response = - client.sendCommand(request.build(), getValidatorList()); - - return response.getReadContainer(); - } - - /** - * Reads the data given the blockID. - * - * @param client - * @param blockID - ID of the block - * @return GetSmallFileResponseProto - * @throws IOException - */ - public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client, - BlockID blockID) throws IOException { - GetBlockRequestProto.Builder getBlock = GetBlockRequestProto - .newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()); - ContainerProtos.GetSmallFileRequestProto getSmallFileRequest = - GetSmallFileRequestProto - .newBuilder().setBlock(getBlock) - .build(); - String id = client.getPipeline().getClosestNode().getUuidString(); - - ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto - .newBuilder() - .setCmdType(Type.GetSmallFile) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(id) - .setGetSmallFile(getSmallFileRequest); - String encodedToken = getEncodedBlockToken(new Text(blockID. - getContainerBlockID().toString())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - client.sendCommand(request, getValidatorList()); - return response.getGetSmallFile(); - } - - /** - * Validates a response from a container protocol call. Any non-successful - * return code is mapped to a corresponding exception and thrown. - * - * @param response container protocol call response - * @throws StorageContainerException if the container protocol call failed - */ - public static void validateContainerResponse( - ContainerCommandResponseProto response - ) throws StorageContainerException { - if (response.getResult() == ContainerProtos.Result.SUCCESS) { - return; - } else if (response.getResult() - == ContainerProtos.Result.BLOCK_NOT_COMMITTED) { - throw new BlockNotCommittedException(response.getMessage()); - } else if (response.getResult() - == ContainerProtos.Result.CLOSED_CONTAINER_IO) { - throw new ContainerNotOpenException(response.getMessage()); - } - throw new StorageContainerException( - response.getMessage(), response.getResult()); - } - - /** - * Returns a url encoded block token. Service param should match the service - * field of token. - * @param service - * - * */ - private static String getEncodedBlockToken(Text service) - throws IOException { - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - Token token = - OzoneBlockTokenSelector.selectBlockToken(service, ugi.getTokens()); - if (token != null) { - return token.encodeToUrlString(); - } - return null; - } - - private static Text getService(DatanodeBlockID blockId) { - return new Text(new StringBuffer() - .append("conID: ") - .append(blockId.getContainerID()) - .append(" locID: ") - .append(blockId.getLocalID()) - .toString()); - } - - public static List getValidatorList() { - List validators = new ArrayList<>(1); - CheckedBiFunction - validator = (request, response) -> validateContainerResponse(response); - validators.add(validator); - return validators; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java deleted file mode 100644 index 8e981586bd665..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -/** - * This package contains StorageContainerManager classes. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecurityException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecurityException.java deleted file mode 100644 index bbe25a9d840c2..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecurityException.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.security.exception; - -import java.io.IOException; - -/** - * Root Security Exception call for all Certificate related Execptions. - */ -public class SCMSecurityException extends IOException { - private final ErrorCode errorCode; - - /** - * Ctor. - * @param message - Error Message. - */ - public SCMSecurityException(String message) { - super(message); - this.errorCode = ErrorCode.DEFAULT; - } - - /** - * Ctor. - * @param message - Message. - * @param cause - Actual cause. - */ - public SCMSecurityException(String message, Throwable cause) { - super(message, cause); - this.errorCode = ErrorCode.DEFAULT; - } - - /** - * Ctor. - * @param message - Message. - * @param error - error code. - */ - public SCMSecurityException(String message, ErrorCode error) { - super(message); - this.errorCode = error; - } - - /** - * Ctor. - * @param cause - Base Exception. - */ - public SCMSecurityException(Throwable cause) { - super(cause); - this.errorCode = ErrorCode.DEFAULT; - } - - public ErrorCode getErrorCode() { - return errorCode; - } - - /** - * Error codes to make it easy to decode these exceptions. - */ - public enum ErrorCode { - DEFAULT, - MISSING_BLOCK_TOKEN, - BLOCK_TOKEN_VERIFICATION_FAILED - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/package-info.java deleted file mode 100644 index b9805925adfd5..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Exceptions thrown by SCM security classes. - */ -package org.apache.hadoop.hdds.security.exception; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenException.java deleted file mode 100644 index 7ea0ebcf21e0a..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenException.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.token; - -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; - -/** - * Block Token Exceptions from the SCM Security layer. - */ -public class BlockTokenException extends SCMSecurityException { - - /** - * Ctor. - * @param message - Error Message. - */ - public BlockTokenException(String message) { - super(message); - } - - /** - * Ctor. - * @param message - Message. - * @param cause - Actual cause. - */ - public BlockTokenException(String message, Throwable cause) { - super(message, cause); - } - - /** - * Ctor. - * @param cause - Base Exception. - */ - public BlockTokenException(Throwable cause) { - super(cause); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java deleted file mode 100644 index e94808ac9d7d1..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.security.token; - -import com.google.common.base.Strings; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; -import java.io.IOException; -import java.security.cert.X509Certificate; - - -/** - * Verify token and return a UGI with token if authenticated. - */ -public class BlockTokenVerifier implements TokenVerifier { - - private final CertificateClient caClient; - private final SecurityConfig conf; - private static boolean testStub = false; - private final static Logger LOGGER = - LoggerFactory.getLogger(BlockTokenVerifier.class); - - public BlockTokenVerifier(SecurityConfig conf, CertificateClient caClient) { - this.conf = conf; - this.caClient = caClient; - } - - private boolean isExpired(long expiryDate) { - return Time.now() > expiryDate; - } - - @Override - public UserGroupInformation verify(String user, String tokenStr) - throws SCMSecurityException { - if (conf.isBlockTokenEnabled()) { - // TODO: add audit logs. - - if (Strings.isNullOrEmpty(tokenStr)) { - throw new BlockTokenException("Fail to find any token (empty or " + - "null.)"); - } - final Token token = new Token(); - OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier(); - try { - token.decodeFromUrlString(tokenStr); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Verifying token:{} for user:{} ", token, user); - } - ByteArrayInputStream buf = new ByteArrayInputStream( - token.getIdentifier()); - DataInputStream in = new DataInputStream(buf); - tokenId.readFields(in); - - } catch (IOException ex) { - throw new BlockTokenException("Failed to decode token : " + tokenStr); - } - - if (caClient == null) { - throw new SCMSecurityException("Certificate client not available " + - "to validate token"); - } - - X509Certificate singerCert; - singerCert = caClient.getCertificate(tokenId.getOmCertSerialId()); - - if (singerCert == null) { - throw new BlockTokenException("Can't find signer certificate " + - "(OmCertSerialId: " + tokenId.getOmCertSerialId() + - ") of the block token for user: " + tokenId.getUser()); - } - boolean validToken = caClient.verifySignature(tokenId.getBytes(), - token.getPassword(), singerCert); - if (!validToken) { - throw new BlockTokenException("Invalid block token for user: " + - tokenId.getUser()); - } - - // check expiration - if (isExpired(tokenId.getExpiryDate())) { - UserGroupInformation tokenUser = tokenId.getUser(); - tokenUser.setAuthenticationMethod( - UserGroupInformation.AuthenticationMethod.TOKEN); - throw new BlockTokenException("Expired block token for user: " + - tokenUser); - } - // defer access mode, bcsid and maxLength check to container dispatcher - UserGroupInformation ugi = tokenId.getUser(); - ugi.addToken(token); - ugi.setAuthenticationMethod(UserGroupInformation - .AuthenticationMethod.TOKEN); - return ugi; - } else { - return UserGroupInformation.createRemoteUser(user); - } - } - - public static boolean isTestStub() { - return testStub; - } - - // For testing purpose only. - public static void setTestStub(boolean isTestStub) { - BlockTokenVerifier.testStub = isTestStub; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java deleted file mode 100644 index 54cf18002c37e..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java +++ /dev/null @@ -1,212 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.security.token; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.Builder; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.security.token.Token.TrivialRenewer; - -import java.io.DataInput; -import java.io.DataInputStream; -import java.io.DataOutput; -import java.io.IOException; -import java.util.EnumSet; - -/** - * Block token identifier for Ozone/HDDS. Ozone block access token is similar - * to HDFS block access token, which is meant to be lightweight and - * short-lived. No need to renew or revoke a block access token. when a - * cached block access token expires, the client simply get a new one. - * Block access token should be cached only in memory and never write to disk. - */ -@InterfaceAudience.Private -public class OzoneBlockTokenIdentifier extends TokenIdentifier { - - static final Text KIND_NAME = new Text("HDDS_BLOCK_TOKEN"); - private long expiryDate; - private String ownerId; - private String blockId; - private EnumSet modes; - private String omCertSerialId; - private long maxLength; - - public OzoneBlockTokenIdentifier() { - } - - public OzoneBlockTokenIdentifier(String ownerId, String blockId, - EnumSet modes, long expiryDate, String omCertSerialId, - long maxLength) { - this.ownerId = ownerId; - this.blockId = blockId; - this.expiryDate = expiryDate; - this.modes = modes == null ? EnumSet.noneOf(AccessModeProto.class) : modes; - this.omCertSerialId = omCertSerialId; - this.maxLength = maxLength; - } - - @Override - public UserGroupInformation getUser() { - if (this.getOwnerId() == null || "".equals(this.getOwnerId())) { - return UserGroupInformation.createRemoteUser(blockId); - } - return UserGroupInformation.createRemoteUser(ownerId); - } - - public long getExpiryDate() { - return expiryDate; - } - - public String getOwnerId() { - return ownerId; - } - - public String getBlockId() { - return blockId; - } - - public EnumSet getAccessModes() { - return modes; - } - - public String getOmCertSerialId(){ - return omCertSerialId; - } - - public long getMaxLength() { - return maxLength; - } - - @Override - public Text getKind() { - return KIND_NAME; - } - - @Override - public String toString() { - return "block_token_identifier (expiryDate=" + this.getExpiryDate() - + ", ownerId=" + this.getOwnerId() - + ", omCertSerialId=" + this.getOmCertSerialId() - + ", blockId=" + this.getBlockId() + ", access modes=" - + this.getAccessModes() + ", maxLength=" + this.getMaxLength() + ")"; - } - - static boolean isEqual(Object a, Object b) { - return a == null ? b == null : a.equals(b); - } - - @Override - public boolean equals(Object obj) { - if (obj == this) { - return true; - } - - if (obj instanceof OzoneBlockTokenIdentifier) { - OzoneBlockTokenIdentifier that = (OzoneBlockTokenIdentifier) obj; - return new EqualsBuilder() - .append(this.expiryDate, that.expiryDate) - .append(this.ownerId, that.ownerId) - .append(this.blockId, that.blockId) - .append(this.modes, that.modes) - .append(this.omCertSerialId, that.omCertSerialId) - .append(this.maxLength, that.maxLength) - .build(); - } - return false; - } - - @Override - public int hashCode() { - return new HashCodeBuilder(133, 567) - .append(this.expiryDate) - .append(this.blockId) - .append(this.ownerId) - .append(this.modes) - .append(this.omCertSerialId) - .append(this.maxLength) - .build(); - } - - @Override - public void readFields(DataInput in) throws IOException { - final DataInputStream dis = (DataInputStream) in; - if (!dis.markSupported()) { - throw new IOException("Could not peek first byte."); - } - BlockTokenSecretProto tokenPtoto = - BlockTokenSecretProto.parseFrom((DataInputStream) in); - this.ownerId = tokenPtoto.getOwnerId(); - this.blockId = tokenPtoto.getBlockId(); - this.modes = EnumSet.copyOf(tokenPtoto.getModesList()); - this.expiryDate = tokenPtoto.getExpiryDate(); - this.omCertSerialId = tokenPtoto.getOmCertSerialId(); - this.maxLength = tokenPtoto.getMaxLength(); - } - - @VisibleForTesting - public static OzoneBlockTokenIdentifier readFieldsProtobuf(DataInput in) - throws IOException { - BlockTokenSecretProto tokenPtoto = - BlockTokenSecretProto.parseFrom((DataInputStream) in); - return new OzoneBlockTokenIdentifier(tokenPtoto.getOwnerId(), - tokenPtoto.getBlockId(), EnumSet.copyOf(tokenPtoto.getModesList()), - tokenPtoto.getExpiryDate(), tokenPtoto.getOmCertSerialId(), - tokenPtoto.getMaxLength()); - } - - @Override - public void write(DataOutput out) throws IOException { - writeProtobuf(out); - } - - @VisibleForTesting - void writeProtobuf(DataOutput out) throws IOException { - Builder builder = BlockTokenSecretProto.newBuilder() - .setBlockId(this.getBlockId()) - .setOwnerId(this.getOwnerId()) - .setOmCertSerialId(this.getOmCertSerialId()) - .setExpiryDate(this.getExpiryDate()) - .setMaxLength(this.getMaxLength()); - // Add access mode allowed - for (AccessModeProto mode : this.getAccessModes()) { - builder.addModes(AccessModeProto.valueOf(mode.name())); - } - out.write(builder.build().toByteArray()); - } - - /** - * Default TrivialRenewer. - */ - @InterfaceAudience.Private - public static class Renewer extends TrivialRenewer { - - @Override - protected Text getKind() { - return KIND_NAME; - } - } -} - diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java deleted file mode 100644 index 9acc75ae17078..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.security.token; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.security.token.TokenSelector; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Collection; - -/** - * A block token selector for Ozone. - */ -@InterfaceAudience.Private -public class OzoneBlockTokenSelector implements - TokenSelector { - - private static final Logger LOG = LoggerFactory - .getLogger(OzoneBlockTokenSelector.class); - - @Override - @SuppressWarnings("unchecked") - public Token selectToken(Text service, - Collection> tokens) { - if (service == null) { - return null; - } - for (Token token : tokens) { - if (OzoneBlockTokenIdentifier.KIND_NAME.equals(token.getKind()) - && token.getService().equals(service)) { - if (LOG.isTraceEnabled()) { - LOG.trace("Getting token for service:{}", service); - } - return (Token) token; - } - } - return null; - } - - /** - * Static method to avoid instantiation. - * */ - @SuppressWarnings("unchecked") - public static Token selectBlockToken(Text service, - Collection> tokens) { - if (service == null) { - return null; - } - for (Token token : tokens) { - if (OzoneBlockTokenIdentifier.KIND_NAME.equals(token.getKind()) - && token.getService().equals(service)) { - if (LOG.isTraceEnabled()) { - LOG.trace("Getting token for service:{}", service); - } - return (Token) token; - } - } - return null; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java deleted file mode 100644 index d8170abe8170e..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.security.token; - -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.security.UserGroupInformation; - -/** - * Ozone GRPC token header verifier. - */ -public interface TokenVerifier { - /** - * Given a user and tokenStr header, return a UGI object with token if - * verified. - * @param user user of the request - * @param tokenStr token str of the request - * @return UGI - * @throws SCMSecurityException - */ - UserGroupInformation verify(String user, String tokenStr) - throws SCMSecurityException; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/package-info.java deleted file mode 100644 index 885bed580c0fe..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains the block token related test classes. - */ -package org.apache.hadoop.hdds.security.token; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java deleted file mode 100644 index 8aaba5df999cc..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java +++ /dev/null @@ -1,371 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslProvider; -import org.bouncycastle.jce.provider.BouncyCastleProvider; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.Provider; -import java.security.Security; -import java.time.Duration; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_KEY_ALGORITHM; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_KEY_LEN; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_SECURITY_PROVIDER; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_ENABLED; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_ENABLED_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_PROVIDER; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_PROVIDER_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_TEST_CERT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_TEST_CERT_DEFAULT; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_ALGORITHM; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_LEN; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PRIVATE_KEY_FILE_NAME; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PUBLIC_KEY_FILE_NAME; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_PROVIDER; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DEFAULT_DURATION_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DEFAULT_DURATION; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DIR_NAME; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DIR_NAME_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_FILE_NAME; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_FILE_NAME_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_MAX_DURATION; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_MAX_DURATION_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_SIGNATURE_ALGO; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_SIGNATURE_ALGO_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; - -/** - * A class that deals with all Security related configs in HDDS. - *

- * This class allows security configs to be read and used consistently across - * all of security related code base. - */ -public class SecurityConfig { - private static final Logger LOG = - LoggerFactory.getLogger(SecurityConfig.class); - private static volatile Provider provider; - private final Configuration configuration; - private final int size; - private final String keyAlgo; - private final String providerString; - private final String metadatDir; - private final String keyDir; - private final String privateKeyFileName; - private final String publicKeyFileName; - private final Duration certDuration; - private final String x509SignatureAlgo; - private final boolean blockTokenEnabled; - private final String certificateDir; - private final String certificateFileName; - private final boolean grpcTlsEnabled; - private boolean grpcTlsUseTestCert; - private final Duration defaultCertDuration; - private final boolean isSecurityEnabled; - - /** - * Constructs a SecurityConfig. - * - * @param configuration - HDDS Configuration - */ - public SecurityConfig(Configuration configuration) { - Preconditions.checkNotNull(configuration, "Configuration cannot be null"); - this.configuration = configuration; - this.size = this.configuration.getInt(HDDS_KEY_LEN, HDDS_DEFAULT_KEY_LEN); - this.keyAlgo = this.configuration.get(HDDS_KEY_ALGORITHM, - HDDS_DEFAULT_KEY_ALGORITHM); - this.providerString = this.configuration.get(HDDS_SECURITY_PROVIDER, - HDDS_DEFAULT_SECURITY_PROVIDER); - - // Please Note: To make it easy for our customers we will attempt to read - // HDDS metadata dir and if that is not set, we will use Ozone directory. - // TODO: We might want to fix this later. - this.metadatDir = this.configuration.get(HDDS_METADATA_DIR_NAME, - configuration.get(OZONE_METADATA_DIRS, - configuration.get(HDDS_DATANODE_DIR_KEY))); - this.keyDir = this.configuration.get(HDDS_KEY_DIR_NAME, - HDDS_KEY_DIR_NAME_DEFAULT); - this.privateKeyFileName = this.configuration.get(HDDS_PRIVATE_KEY_FILE_NAME, - HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT); - this.publicKeyFileName = this.configuration.get(HDDS_PUBLIC_KEY_FILE_NAME, - HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT); - - String durationString = this.configuration.get(HDDS_X509_MAX_DURATION, - HDDS_X509_MAX_DURATION_DEFAULT); - this.certDuration = Duration.parse(durationString); - this.x509SignatureAlgo = this.configuration.get(HDDS_X509_SIGNATURE_ALGO, - HDDS_X509_SIGNATURE_ALGO_DEFAULT); - this.certificateDir = this.configuration.get(HDDS_X509_DIR_NAME, - HDDS_X509_DIR_NAME_DEFAULT); - this.certificateFileName = this.configuration.get(HDDS_X509_FILE_NAME, - HDDS_X509_FILE_NAME_DEFAULT); - - this.blockTokenEnabled = this.configuration.getBoolean( - HDDS_BLOCK_TOKEN_ENABLED, - HDDS_BLOCK_TOKEN_ENABLED_DEFAULT); - - this.grpcTlsEnabled = this.configuration.getBoolean(HDDS_GRPC_TLS_ENABLED, - HDDS_GRPC_TLS_ENABLED_DEFAULT); - - if (grpcTlsEnabled) { - this.grpcTlsUseTestCert = this.configuration.getBoolean( - HDDS_GRPC_TLS_TEST_CERT, HDDS_GRPC_TLS_TEST_CERT_DEFAULT); - } - - this.isSecurityEnabled = this.configuration.getBoolean( - OZONE_SECURITY_ENABLED_KEY, - OZONE_SECURITY_ENABLED_DEFAULT); - - String certDurationString = - this.configuration.get(HDDS_X509_DEFAULT_DURATION, - HDDS_X509_DEFAULT_DURATION_DEFAULT); - defaultCertDuration = Duration.parse(certDurationString); - - - // First Startup -- if the provider is null, check for the provider. - if (SecurityConfig.provider == null) { - synchronized (SecurityConfig.class) { - provider = Security.getProvider(this.providerString); - if (SecurityConfig.provider == null) { - // Provider not found, let us try to Dynamically initialize the - // provider. - provider = initSecurityProvider(this.providerString); - } - } - } - } - - /** - * Returns true if security is enabled for OzoneCluster. This is determined - * by value of OZONE_SECURITY_ENABLED_KEY. - * - * @return true if security is enabled for OzoneCluster. - */ - public boolean isSecurityEnabled() { - return isSecurityEnabled; - } - - /** - * Returns the Default Certificate Duration. - * - * @return Duration for the default certificate issue. - */ - public Duration getDefaultCertDuration() { - return defaultCertDuration; - } - - /** - * Returns the Standard Certificate file name. - * - * @return String - Name of the Certificate File. - */ - public String getCertificateFileName() { - return certificateFileName; - } - - /** - * Returns the public key file name, This is used for storing the public keys - * on disk. - * - * @return String, File name used for public keys. - */ - public String getPublicKeyFileName() { - return publicKeyFileName; - } - - /** - * Returns the private key file name.This is used for storing the private keys - * on disk. - * - * @return String, File name used for private keys. - */ - public String getPrivateKeyFileName() { - return privateKeyFileName; - } - - /** - * Returns the File path to where keys are stored with an additional component - * name inserted in between. - * - * @param component - Component Name - String. - * @return Path Key location. - */ - public Path getKeyLocation(String component) { - Preconditions.checkNotNull(this.metadatDir, "Metadata directory can't be" - + " null. Please check configs."); - return Paths.get(metadatDir, component, keyDir); - } - - /** - * Returns the File path to where certificates are stored with an addition - * component - * name inserted in between. - * - * @param component - Component Name - String. - * @return Path location. - */ - public Path getCertificateLocation(String component) { - Preconditions.checkNotNull(this.metadatDir, "Metadata directory can't be" - + " null. Please check configs."); - return Paths.get(metadatDir, component, certificateDir); - } - - /** - * Gets the Key Size, The default key size is 2048, since the default - * algorithm used is RSA. User can change this by setting the "hdds.key.len" - * in configuration. - * - * @return key size. - */ - public int getSize() { - return size; - } - - /** - * Returns the Provider name. SCM defaults to using Bouncy Castle and will - * return "BC". - * - * @return String Provider name. - */ - public String getProvider() { - return providerString; - } - - /** - * Returns the Key generation Algorithm used. User can change this by setting - * the "hdds.key.algo" in configuration. - * - * @return String Algo. - */ - public String getKeyAlgo() { - return keyAlgo; - } - - /** - * Returns the X.509 Signature Algorithm used. This can be changed by setting - * "hdds.x509.signature.algorithm" to the new name. The default algorithm is - * SHA256withRSA. - * - * @return String - */ - public String getSignatureAlgo() { - return x509SignatureAlgo; - } - - /** - * Returns the Configuration used for initializing this SecurityConfig. - * - * @return Configuration - */ - public Configuration getConfiguration() { - return configuration; - } - - /** - * Returns the maximum length a certificate can be valid in SCM. The default - * value is 5 years. This can be changed by setting "hdds.x509.max.duration" - * in configuration. The formats accepted are based on the ISO-8601 duration - * format PnDTnHnMn.nS - *

- * Default value is 5 years and written as P1865D. - * - * @return Duration. - */ - public Duration getMaxCertificateDuration() { - return this.certDuration; - } - - public boolean isBlockTokenEnabled() { - return this.blockTokenEnabled; - } - - /** - * Returns true if TLS is enabled for gRPC services. - * @return true if TLS is enabled for gRPC services. - */ - public boolean isGrpcTlsEnabled() { - return this.grpcTlsEnabled; - } - - /** - * Get the gRPC TLS provider. - * @return the gRPC TLS Provider. - */ - public SslProvider getGrpcSslProvider() { - return SslProvider.valueOf(configuration.get(HDDS_GRPC_TLS_PROVIDER, - HDDS_GRPC_TLS_PROVIDER_DEFAULT)); - } - - /** - * Return true if using test certificates with authority as localhost. - * This should be used only for unit test where certificates are generated - * by openssl with localhost as DN and should never use for production as it - * will bypass the hostname/ip matching verification. - * @return true if using test certificates. - */ - public boolean useTestCert() { - return grpcTlsUseTestCert; - } - - /** - * Adds a security provider dynamically if it is not loaded already. - * - * @param providerName - name of the provider. - */ - private Provider initSecurityProvider(String providerName) { - switch (providerName) { - case "BC": - Security.addProvider(new BouncyCastleProvider()); - return Security.getProvider(providerName); - default: - LOG.error("Security Provider:{} is unknown", provider); - throw new SecurityException("Unknown security provider:" + provider); - } - } - - /** - * Returns max date for which S3 tokens will be valid. - */ - public long getS3TokenMaxDate() { - return getConfiguration().getTimeDuration( - OzoneConfigKeys.OZONE_S3_TOKEN_MAX_LIFETIME_KEY, - OzoneConfigKeys.OZONE_S3_TOKEN_MAX_LIFETIME_KEY_DEFAULT, - TimeUnit.MICROSECONDS); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java deleted file mode 100644 index 12ececd8d4b3a..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.PKIProfile; -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.bouncycastle.asn1.ASN1Encodable; -import org.bouncycastle.asn1.ASN1ObjectIdentifier; -import org.bouncycastle.asn1.pkcs.Attribute; -import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; -import org.bouncycastle.asn1.x500.RDN; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.asn1.x509.Extensions; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.operator.ContentVerifierProvider; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.operator.jcajce.JcaContentVerifierProviderBuilder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.bouncycastle.pkcs.PKCSException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.CompletableFuture; - -/** - * A base approver class for certificate approvals. - */ -public abstract class BaseApprover implements CertificateApprover { - private static final Logger LOG = - LoggerFactory.getLogger(CertificateApprover.class); - private final PKIProfile profile; - private final SecurityConfig securityConfig; - - public BaseApprover(PKIProfile pkiProfile, SecurityConfig config) { - this.profile = Objects.requireNonNull(pkiProfile); - this.securityConfig = Objects.requireNonNull(config); - } - - /** - * Returns the Security config. - * - * @return SecurityConfig - */ - public SecurityConfig getSecurityConfig() { - return securityConfig; - } - - /** - * Returns the Attribute array that encodes extensions. - * - * @param request - Certificate Request - * @return - An Array of Attributes that encode various extensions requested - * in this certificate. - */ - Attribute[] getAttributes(PKCS10CertificationRequest request) { - Objects.requireNonNull(request); - return - request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest); - } - - /** - * Returns a list of Extensions encoded in a given attribute. - * - * @param attribute - Attribute to decode. - * @return - List of Extensions. - */ - List getExtensionsList(Attribute attribute) { - Objects.requireNonNull(attribute); - List extensionsList = new ArrayList<>(); - for (ASN1Encodable value : attribute.getAttributeValues()) { - if(value != null) { - Extensions extensions = Extensions.getInstance(value); - extensionsList.add(extensions); - } - } - return extensionsList; - } - - /** - * Returns the Extension decoded into a Java Collection. - * @param extensions - A set of Extensions in ASN.1. - * @return List of Decoded Extensions. - */ - List getIndividualExtension(Extensions extensions) { - Objects.requireNonNull(extensions); - List extenList = new ArrayList<>(); - for (ASN1ObjectIdentifier id : extensions.getExtensionOIDs()) { - if (id != null) { - Extension ext = extensions.getExtension(id); - if (ext != null) { - extenList.add(ext); - } - } - } - return extenList; - } - - - - /** - * This function verifies all extensions in the certificate. - * - * @param request - CSR - * @return - true if the extensions are acceptable by the profile, false - * otherwise. - */ - boolean verfiyExtensions(PKCS10CertificationRequest request) { - Objects.requireNonNull(request); - /* - * Inside a CSR we have - * 1. A list of Attributes - * 2. Inside each attribute a list of extensions. - * 3. We need to walk thru the each extension and verify they - * are expected and we can put that into a certificate. - */ - - for (Attribute attr : getAttributes(request)) { - for (Extensions extensionsList : getExtensionsList(attr)) { - for (Extension extension : getIndividualExtension(extensionsList)) { - if (!profile.validateExtension(extension)) { - LOG.error("Failed to verify extension. {}", - extension.getExtnId().getId()); - return false; - } - } - } - } - return true; - } - - /** - * Verifies the Signature on the CSR is valid. - * - * @param pkcs10Request - PCKS10 Request. - * @return True if it is valid, false otherwise. - * @throws OperatorCreationException - On Error. - * @throws PKCSException - on Error. - */ - boolean verifyPkcs10Request(PKCS10CertificationRequest pkcs10Request) - throws OperatorCreationException, PKCSException { - ContentVerifierProvider verifierProvider = new - JcaContentVerifierProviderBuilder() - .setProvider(this.securityConfig.getProvider()) - .build(pkcs10Request.getSubjectPublicKeyInfo()); - return - pkcs10Request.isSignatureValid(verifierProvider); - } - - /** - * {@inheritDoc} - */ - @Override - public CompletableFuture inspectCSR(String csr) - throws IOException { - return inspectCSR(CertificateSignRequest.getCertificationRequest(csr)); - } - - /** - * {@inheritDoc} - */ - @Override - public CompletableFuture - inspectCSR(PKCS10CertificationRequest csr) { - /** - * The base approver executes the following algorithm to verify that a - * CSR meets the PKI Profile criteria. - * - * 0. For time being (Until we have SCM HA) we will deny all request to - * become an intermediary CA. So we will not need to verify using CA - * profile, right now. - * - * 1. We verify the proof of possession. That is we verify the entity - * that sends us the CSR indeed has the private key for the said public key. - * - * 2. Then we will verify the RDNs meet the format and the Syntax that - * PKI profile dictates. - * - * 3. Then we decode each and every extension and ask if the PKI profile - * approves of these extension requests. - * - * 4. If all of these pass, We will return a Future which will point to - * the Certificate when finished. - */ - - CompletableFuture response = - new CompletableFuture<>(); - try { - // Step 0: Verify this is not a CA Certificate. - // Will be done by the Ozone PKI profile for time being. - // If there are any basicConstraints, they will flagged as not - // supported for time being. - - // Step 1: Let us verify that Certificate is indeed signed by someone - // who has access to the private key. - if (!verifyPkcs10Request(csr)) { - LOG.error("Failed to verify the signature in CSR."); - response.completeExceptionally(new SCMSecurityException("Failed to " + - "verify the CSR.")); - } - - // Step 2: Verify the RDNs are in the correct format. - // TODO: Ozone Profile does not verify RDN now, so this call will pass. - for (RDN rdn : csr.getSubject().getRDNs()) { - if (!profile.validateRDN(rdn)) { - LOG.error("Failed in verifying RDNs"); - response.completeExceptionally(new SCMSecurityException("Failed to " + - "verify the RDNs. Please check the subject name.")); - } - } - - // Step 3: Verify the Extensions. - if (!verfiyExtensions(csr)) { - LOG.error("failed in verification of extensions."); - response.completeExceptionally(new SCMSecurityException("Failed to " + - "verify extensions.")); - } - - } catch (OperatorCreationException | PKCSException e) { - LOG.error("Approval Failure.", e); - response.completeExceptionally(new SCMSecurityException(e)); - } - return response; - } - - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateApprover.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateApprover.java deleted file mode 100644 index 31d0aeaddc56c..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateApprover.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; - -import java.io.IOException; -import java.security.PrivateKey; -import java.util.Date; -import java.util.concurrent.CompletableFuture; - -/** - * Certificate Approver interface is used to inspectCSR a certificate. - */ -public interface CertificateApprover { - /** - * Approves a Certificate Request based on the policies of this approver. - * - * @param csr - Certificate Signing Request. - * @return - Future that will be contain the certificate or exception. - */ - CompletableFuture - inspectCSR(PKCS10CertificationRequest csr); - - /** - * Approves a Certificate Request based on the policies of this approver. - * - * @param csr - Certificate Signing Request. - * @return - Future that will be contain the certificate or exception. - * @throws IOException - On Error. - */ - CompletableFuture - inspectCSR(String csr) throws IOException; - - /** - * Sign function signs a Certificate. - * @param config - Security Config. - * @param caPrivate - CAs private Key. - * @param caCertificate - CA Certificate. - * @param validFrom - Begin Date - * @param validTill - End Date - * @param certificationRequest - Certification Request. - * @param scmId - SCM id. - * @param clusterId - Cluster id. - * @return Signed Certificate. - * @throws IOException - On Error - * @throws OperatorCreationException - on Error. - */ - @SuppressWarnings("ParameterNumber") - X509CertificateHolder sign( - SecurityConfig config, - PrivateKey caPrivate, - X509CertificateHolder caCertificate, - Date validFrom, - Date validTill, - PKCS10CertificationRequest certificationRequest, - String scmId, - String clusterId) - throws IOException, OperatorCreationException; - - - /** - * Approval Types for a certificate request. - */ - enum ApprovalType { - KERBEROS_TRUSTED, /* The Request came from a DN using Kerberos Identity*/ - MANUAL, /* Wait for a Human being to inspect CSR of this certificate */ - TESTING_AUTOMATIC /* For testing purpose, Automatic Approval. */ - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java deleted file mode 100644 index b1d7d6b08440e..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateApprover.ApprovalType; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; - -import java.io.IOException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.util.concurrent.Future; - -/** - * Interface for Certificate Authority. This can be extended to talk to - * external CAs later or HSMs later. - */ -public interface CertificateServer { - /** - * Initialize the Certificate Authority. - * - * @param securityConfig - Security Configuration. - * @param type - The Type of CertificateServer we are creating, we make this - * explicit so that when we read code it is visible to the users. - * @throws SCMSecurityException - Throws if the init fails. - */ - void init(SecurityConfig securityConfig, CAType type) - throws SCMSecurityException; - - /** - * Returns the CA Certificate for this CA. - * - * @return X509CertificateHolder - Certificate for this CA. - * @throws CertificateException - usually thrown if this CA is not - * initialized. - * @throws IOException - on Error. - */ - X509CertificateHolder getCACertificate() - throws CertificateException, IOException; - - /** - * Returns the Certificate corresponding to given certificate serial id if - * exist. Return null if it doesn't exist. - * - * @return certSerialId - Certificate serial id. - * @throws CertificateException - usually thrown if this CA is not - * initialized. - * @throws IOException - on Error. - */ - X509Certificate getCertificate(String certSerialId) - throws CertificateException, IOException; - - /** - * Request a Certificate based on Certificate Signing Request. - * - * @param csr - Certificate Signing Request. - * @param type - An Enum which says what kind of approval process to follow. - * @return A future that will have this certificate when this request is - * approved. - * @throws SCMSecurityException - on Error. - */ - Future requestCertificate( - PKCS10CertificationRequest csr, - CertificateApprover.ApprovalType type) - throws SCMSecurityException; - - - /** - * Request a Certificate based on Certificate Signing Request. - * - * @param csr - Certificate Signing Request as a PEM encoded String. - * @param type - An Enum which says what kind of approval process to follow. - * @return A future that will have this certificate when this request is - * approved. - * @throws SCMSecurityException - on Error. - */ - Future requestCertificate(String csr, - ApprovalType type) throws IOException; - - /** - * Revokes a Certificate issued by this CertificateServer. - * - * @param certificate - Certificate to revoke - * @param approver - Approval process to follow. - * @return Future that tells us what happened. - * @throws SCMSecurityException - on Error. - */ - Future revokeCertificate(X509Certificate certificate, - ApprovalType approver) throws SCMSecurityException; - - /** - * TODO : CRL, OCSP etc. Later. This is the start of a CertificateServer - * framework. - */ - - - /** - * Make it explicit what type of CertificateServer we are creating here. - */ - enum CAType { - SELF_SIGNED_CA, - INTERMEDIARY_CA - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java deleted file mode 100644 index 961d048c51c6f..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import java.io.IOException; -import java.math.BigInteger; -import java.security.cert.X509Certificate; - -/** - * This interface allows the DefaultCA to be portable and use different DB - * interfaces later. It also allows us define this interface in the SCM layer - * by which we don't have to take a circular dependency between hdds-common - * and the SCM. - * - * With this interface, DefaultCA server read and write DB or persistence - * layer and we can write to SCM's Metadata DB. - */ -public interface CertificateStore { - - /** - * Writes a new certificate that was issued to the persistent store. - * @param serialID - Certificate Serial Number. - * @param certificate - Certificate to persist. - * @throws IOException - on Failure. - */ - void storeValidCertificate(BigInteger serialID, - X509Certificate certificate) throws IOException; - - /** - * Moves a certificate in a transactional manner from valid certificate to - * revoked certificate state. - * @param serialID - Serial ID of the certificate. - * @throws IOException - */ - void revokeCertificate(BigInteger serialID) throws IOException; - - /** - * Deletes an expired certificate from the store. Please note: We don't - * remove revoked certificates, we need that information to generate the - * CRLs. - * @param serialID - Certificate ID. - */ - void removeExpiredCertificate(BigInteger serialID) throws IOException; - - /** - * Retrieves a Certificate based on the Serial number of that certificate. - * @param serialID - ID of the certificate. - * @param certType - * @return X509Certificate - * @throws IOException - */ - X509Certificate getCertificateByID(BigInteger serialID, CertType certType) - throws IOException; - - /** - * Different kind of Certificate stores. - */ - enum CertType { - VALID_CERTS, - REVOKED_CERTS - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java deleted file mode 100644 index c7f37c18063dd..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.PKIProfile; -import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil; -import org.apache.hadoop.util.Time; -import org.bouncycastle.asn1.x500.X500Name; -import org.bouncycastle.asn1.x500.style.BCStyle; -import org.bouncycastle.asn1.x509.AlgorithmIdentifier; -import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.cert.X509v3CertificateBuilder; -import org.bouncycastle.crypto.params.AsymmetricKeyParameter; -import org.bouncycastle.crypto.params.RSAKeyParameters; -import org.bouncycastle.crypto.util.PrivateKeyFactory; -import org.bouncycastle.crypto.util.PublicKeyFactory; -import org.bouncycastle.operator.ContentSigner; -import org.bouncycastle.operator.DefaultDigestAlgorithmIdentifierFinder; -import org.bouncycastle.operator.DefaultSignatureAlgorithmIdentifierFinder; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.operator.bc.BcRSAContentSignerBuilder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; - -import java.io.IOException; -import java.math.BigInteger; -import java.security.PrivateKey; -import java.util.Date; -import java.util.concurrent.CompletableFuture; - -/** - * Default Approver used the by the DefaultCA. - */ -public class DefaultApprover extends BaseApprover { - - /** - * Constructs the Default Approver. - * - * @param pkiProfile - PKI Profile to use. - * @param config - Security Config - */ - public DefaultApprover(PKIProfile pkiProfile, SecurityConfig config) { - super(pkiProfile, config); - } - - /** - * Sign function signs a Certificate. - * @param config - Security Config. - * @param caPrivate - CAs private Key. - * @param caCertificate - CA Certificate. - * @param validFrom - Begin Da te - * @param validTill - End Date - * @param certificationRequest - Certification Request. - * @param scmId - SCM id. - * @param clusterId - Cluster id. - * @return Signed Certificate. - * @throws IOException - On Error - * @throws OperatorCreationException - on Error. - */ - @SuppressWarnings("ParameterNumber") - public X509CertificateHolder sign( - SecurityConfig config, - PrivateKey caPrivate, - X509CertificateHolder caCertificate, - Date validFrom, - Date validTill, - PKCS10CertificationRequest certificationRequest, - String scmId, - String clusterId) throws IOException, OperatorCreationException { - - AlgorithmIdentifier sigAlgId = new - DefaultSignatureAlgorithmIdentifierFinder().find( - config.getSignatureAlgo()); - AlgorithmIdentifier digAlgId = new DefaultDigestAlgorithmIdentifierFinder() - .find(sigAlgId); - - AsymmetricKeyParameter asymmetricKP = PrivateKeyFactory.createKey(caPrivate - .getEncoded()); - SubjectPublicKeyInfo keyInfo = - certificationRequest.getSubjectPublicKeyInfo(); - - // Get scmId and cluster Id from subject name. - X500Name x500Name = certificationRequest.getSubject(); - String csrScmId = x500Name.getRDNs(BCStyle.OU)[0].getFirst().getValue(). - toASN1Primitive().toString(); - String csrClusterId = x500Name.getRDNs(BCStyle.O)[0].getFirst().getValue(). - toASN1Primitive().toString(); - - if (!scmId.equals(csrScmId) || !clusterId.equals(csrClusterId)) { - if (csrScmId.equalsIgnoreCase("null") && - csrClusterId.equalsIgnoreCase("null")) { - // Special case to handle DN certificate generation as DN might not know - // scmId and clusterId before registration. In secure mode registration - // will succeed only after datanode has a valid certificate. - String cn = x500Name.getRDNs(BCStyle.CN)[0].getFirst().getValue() - .toASN1Primitive().toString(); - x500Name = SecurityUtil.getDistinguishedName(cn, scmId, clusterId); - } else { - // Throw exception if scmId and clusterId doesn't match. - throw new SCMSecurityException("ScmId and ClusterId in CSR subject" + - " are incorrect."); - } - } - - RSAKeyParameters rsa = - (RSAKeyParameters) PublicKeyFactory.createKey(keyInfo); - if (rsa.getModulus().bitLength() < config.getSize()) { - throw new SCMSecurityException("Key size is too small in certificate " + - "signing request"); - } - X509v3CertificateBuilder certificateGenerator = - new X509v3CertificateBuilder( - caCertificate.getSubject(), - // Serial is not sequential but it is monotonically increasing. - BigInteger.valueOf(Time.monotonicNowNanos()), - validFrom, - validTill, - x500Name, keyInfo); - - ContentSigner sigGen = new BcRSAContentSignerBuilder(sigAlgId, digAlgId) - .build(asymmetricKP); - - return certificateGenerator.build(sigGen); - - } - - @Override - public CompletableFuture inspectCSR(String csr) - throws IOException { - return super.inspectCSR(csr); - } - - @Override - public CompletableFuture - inspectCSR(PKCS10CertificationRequest csr) { - return super.inspectCSR(csr); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java deleted file mode 100644 index a5147b34e2f54..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java +++ /dev/null @@ -1,491 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.DefaultProfile; -import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.PKIProfile; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.math.BigInteger; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.security.spec.InvalidKeySpecException; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.LocalTime; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.Future; -import java.util.function.Consumer; - -import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.*; - -/** - * The default CertificateServer used by SCM. This has no dependencies on any - * external system, this allows us to bootstrap a CertificateServer from - * Scratch. - *

- * Details ======= - *

- * The Default CA server is one of the many possible implementations of an SCM - * Certificate Authority. - *

- * A certificate authority needs the Root Certificates and its private key to - * operate. The init function of the DefaultCA Server detects four possible - * states the System can be in. - *

- * 1. Success - This means that the expected Certificates and Keys are in - * place, and the CA was able to read those files into memory. - *

- * 2. Missing Keys - This means that private keys are missing. This is an error - * state which SCM CA cannot recover from. The cluster might have been - * initialized earlier and for some reason, we are not able to find the private - * keys for the CA. Eventually we will have 2 ways to recover from this state, - * first one is to copy the SCM CA private keys from a backup. Second one is to - * rekey the whole cluster. Both of these are improvements we will support in - * future. - *

- * 3. Missing Certificate - Similar to Missing Keys, but the root certificates - * are missing. - *

- * 4. Initialize - We don't have keys or certificates. DefaultCA assumes that - * this is a system bootup and will generate the keys and certificates - * automatically. - *

- * The init() follows the following logic, - *

- * 1. Compute the Verification Status -- Success, Missing Keys, Missing Certs or - * Initialize. - *

- * 2. ProcessVerificationStatus - Returns a Lambda, based on the Verification - * Status. - *

- * 3. Invoke the Lambda function. - *

- * At the end of the init function, we have functional CA. This function can be - * invoked as many times since we will regenerate the keys and certs only if - * both of them are missing. - */ -public class DefaultCAServer implements CertificateServer { - private static final Logger LOG = - LoggerFactory.getLogger(DefaultCAServer.class); - private final String subject; - private final String clusterID; - private final String scmID; - private String componentName = Paths.get("scm", "ca").toString(); - private Path caKeysPath; - private Path caRootX509Path; - private SecurityConfig config; - /** - * TODO: We will make these configurable in the future. - */ - private PKIProfile profile; - private CertificateApprover approver; - private CertificateStore store; - - /** - * Create an Instance of DefaultCAServer. - * @param subject - String Subject - * @param clusterID - String ClusterID - * @param scmID - String SCMID. - * @param certificateStore - A store used to persist Certificates. - */ - public DefaultCAServer(String subject, String clusterID, String scmID, - CertificateStore certificateStore) { - this.subject = subject; - this.clusterID = clusterID; - this.scmID = scmID; - this.store = certificateStore; - } - - @Override - public void init(SecurityConfig securityConfig, CAType type) - throws SCMSecurityException { - caKeysPath = securityConfig.getKeyLocation(componentName); - caRootX509Path = securityConfig.getCertificateLocation(componentName); - this.config = securityConfig; - - // TODO: Make these configurable and load different profiles based on - // config. - profile = new DefaultProfile(); - this.approver = new DefaultApprover(profile, this.config); - - /* In future we will spilt this code to have different kind of CAs. - * Right now, we have only self-signed CertificateServer. - */ - - if (type == CAType.SELF_SIGNED_CA) { - VerificationStatus status = verifySelfSignedCA(securityConfig); - Consumer caInitializer = - processVerificationStatus(status); - caInitializer.accept(securityConfig); - return; - } - - LOG.error("We support only Self-Signed CAs for now."); - throw new IllegalStateException("Not implemented functionality requested."); - } - - @Override - public X509CertificateHolder getCACertificate() throws IOException { - CertificateCodec certificateCodec = - new CertificateCodec(config, componentName); - try { - return certificateCodec.readCertificate(); - } catch (CertificateException e) { - throw new IOException(e); - } - } - - /** - * Returns the Certificate corresponding to given certificate serial id if - * exist. Return null if it doesn't exist. - * - * @param certSerialId - Certificate for this CA. - * @return X509CertificateHolder - * @throws CertificateException - usually thrown if this CA is not - * initialized. - * @throws IOException - on Error. - */ - @Override - public X509Certificate getCertificate(String certSerialId) throws - IOException { - return store.getCertificateByID(new BigInteger(certSerialId), - CertificateStore.CertType.VALID_CERTS); - } - - private KeyPair getCAKeys() throws IOException { - KeyCodec keyCodec = new KeyCodec(config, componentName); - try { - return new KeyPair(keyCodec.readPublicKey(), keyCodec.readPrivateKey()); - } catch (InvalidKeySpecException | NoSuchAlgorithmException e) { - throw new IOException(e); - } - } - - @Override - public Future requestCertificate( - PKCS10CertificationRequest csr, - CertificateApprover.ApprovalType approverType) { - LocalDate beginDate = LocalDate.now().atStartOfDay().toLocalDate(); - LocalDateTime temp = LocalDateTime.of(beginDate, LocalTime.MIDNIGHT); - LocalDate endDate = - temp.plus(config.getDefaultCertDuration()).toLocalDate(); - - CompletableFuture xcertHolder = - approver.inspectCSR(csr); - - if(xcertHolder.isCompletedExceptionally()) { - // This means that approver told us there are things which it disagrees - // with in this Certificate Request. Since the first set of sanity - // checks failed, we just return the future object right here. - return xcertHolder; - } - try { - switch (approverType) { - case MANUAL: - xcertHolder.completeExceptionally(new SCMSecurityException("Manual " + - "approval is not yet implemented.")); - break; - case KERBEROS_TRUSTED: - case TESTING_AUTOMATIC: - X509CertificateHolder xcert; - try { - xcert = signAndStoreCertificate(beginDate, endDate, csr); - } catch (SCMSecurityException e) { - // Certificate with conflicting serial id, retry again may resolve - // this issue. - LOG.error("Certificate storage failed, retrying one more time.", e); - xcert = signAndStoreCertificate(beginDate, endDate, csr); - } - - xcertHolder.complete(xcert); - break; - default: - return null; // cannot happen, keeping checkstyle happy. - } - } catch (CertificateException | IOException | OperatorCreationException e) { - LOG.error("Unable to issue a certificate. {}", e); - xcertHolder.completeExceptionally(new SCMSecurityException(e)); - } - return xcertHolder; - } - - private X509CertificateHolder signAndStoreCertificate(LocalDate beginDate, - LocalDate endDate, PKCS10CertificationRequest csr) throws IOException, - OperatorCreationException, CertificateException { - X509CertificateHolder xcert = approver.sign(config, - getCAKeys().getPrivate(), - getCACertificate(), java.sql.Date.valueOf(beginDate), - java.sql.Date.valueOf(endDate), csr, scmID, clusterID); - store.storeValidCertificate(xcert.getSerialNumber(), - CertificateCodec.getX509Certificate(xcert)); - return xcert; - } - - @Override - public Future requestCertificate(String csr, - CertificateApprover.ApprovalType type) throws IOException { - PKCS10CertificationRequest request = - getCertificationRequest(csr); - return requestCertificate(request, type); - } - - @Override - public Future revokeCertificate(X509Certificate certificate, - CertificateApprover.ApprovalType approverType) - throws SCMSecurityException { - CompletableFuture revoked = new CompletableFuture<>(); - if (certificate == null) { - revoked.completeExceptionally(new SCMSecurityException( - "Certificate cannot be null")); - return revoked; - } - try { - store.revokeCertificate(certificate.getSerialNumber()); - } catch (IOException ex) { - LOG.error("Revoking the certificate failed. {}", ex.getCause()); - throw new SCMSecurityException(ex); - } - return revoked; - } - - /** - * Generates a Self Signed CertificateServer. These are the steps in - * generating a Self-Signed CertificateServer. - *

- * 1. Generate a Private/Public Key Pair. 2. Persist to a protected location. - * 3. Generate a SelfSigned Root CertificateServer certificate. - * - * @param securityConfig - Config. - */ - private void generateSelfSignedCA(SecurityConfig securityConfig) throws - NoSuchAlgorithmException, NoSuchProviderException, IOException { - KeyPair keyPair = generateKeys(securityConfig); - generateRootCertificate(securityConfig, keyPair); - } - - /** - * Verify Self-Signed CertificateServer. 1. Check if the Certificate exist. 2. - * Check if the key pair exists. - * - * @param securityConfig -- Config - * @return Verification Status - */ - private VerificationStatus verifySelfSignedCA(SecurityConfig securityConfig) { - /* - The following is the truth table for the States. - True means we have that file False means it is missing. - +--------------+--------+--------+--------------+ - | Certificates | Keys | Result | Function | - +--------------+--------+--------+--------------+ - | True | True | True | Success | - | False | False | True | Initialize | - | True | False | False | Missing Key | - | False | True | False | Missing Cert | - +--------------+--------+--------+--------------+ - - This truth table maps to ~(certs xor keys) or certs == keys - */ - boolean keyStatus = checkIfKeysExist(); - boolean certStatus = checkIfCertificatesExist(); - - if ((certStatus == keyStatus) && (certStatus)) { - return VerificationStatus.SUCCESS; - } - - if ((certStatus == keyStatus) && (!certStatus)) { - return VerificationStatus.INITIALIZE; - } - - // At this point certStatus is not equal to keyStatus. - if (certStatus) { - return VerificationStatus.MISSING_KEYS; - } - - return VerificationStatus.MISSING_CERTIFICATE; - } - - /** - * Returns Keys status. - * - * @return True if the key files exist. - */ - private boolean checkIfKeysExist() { - if (!Files.exists(caKeysPath)) { - return false; - } - - return Files.exists(Paths.get(caKeysPath.toString(), - this.config.getPrivateKeyFileName())); - } - - /** - * Returns certificate Status. - * - * @return True if the Certificate files exist. - */ - private boolean checkIfCertificatesExist() { - if (!Files.exists(caRootX509Path)) { - return false; - } - return Files.exists(Paths.get(caRootX509Path.toString(), - this.config.getCertificateFileName())); - } - - /** - * Based on the Status of the verification, we return a lambda that gets - * executed by the init function of the CA. - * - * @param status - Verification Status. - */ - @VisibleForTesting - Consumer processVerificationStatus( - VerificationStatus status) { - Consumer consumer = null; - switch (status) { - case SUCCESS: - consumer = (arg) -> LOG.info("CertificateServer validation is " + - "successful"); - break; - case MISSING_KEYS: - consumer = (arg) -> { - LOG.error("We have found the Certificate for this CertificateServer, " + - "but keys used by this CertificateServer is missing. This is a " + - "non-recoverable error. Please restart the system after locating " + - "the Keys used by the CertificateServer."); - LOG.error("Exiting due to unrecoverable CertificateServer error."); - throw new IllegalStateException("Missing Keys, cannot continue."); - }; - break; - case MISSING_CERTIFICATE: - consumer = (arg) -> { - LOG.error("We found the keys, but the root certificate for this " + - "CertificateServer is missing. Please restart SCM after locating " + - "the " + - "Certificates."); - LOG.error("Exiting due to unrecoverable CertificateServer error."); - throw new IllegalStateException("Missing Root Certs, cannot continue."); - }; - break; - case INITIALIZE: - consumer = (arg) -> { - try { - generateSelfSignedCA(arg); - } catch (NoSuchProviderException | NoSuchAlgorithmException - | IOException e) { - LOG.error("Unable to initialize CertificateServer.", e); - } - VerificationStatus newStatus = verifySelfSignedCA(arg); - if (newStatus != VerificationStatus.SUCCESS) { - LOG.error("Unable to initialize CertificateServer, failed in " + - "verification."); - } - }; - break; - default: - /* Make CheckStyle happy */ - break; - } - return consumer; - } - - /** - * Generates a KeyPair for the Certificate. - * - * @param securityConfig - SecurityConfig. - * @return Key Pair. - * @throws NoSuchProviderException - on Error. - * @throws NoSuchAlgorithmException - on Error. - * @throws IOException - on Error. - */ - private KeyPair generateKeys(SecurityConfig securityConfig) - throws NoSuchProviderException, NoSuchAlgorithmException, IOException { - HDDSKeyGenerator keyGenerator = new HDDSKeyGenerator(securityConfig); - KeyPair keys = keyGenerator.generateKey(); - KeyCodec keyPEMWriter = new KeyCodec(securityConfig, - componentName); - keyPEMWriter.writeKey(keys); - return keys; - } - - /** - * Generates a self-signed Root Certificate for CA. - * - * @param securityConfig - SecurityConfig - * @param key - KeyPair. - * @throws IOException - on Error. - * @throws SCMSecurityException - on Error. - */ - private void generateRootCertificate(SecurityConfig securityConfig, - KeyPair key) throws IOException, SCMSecurityException { - Preconditions.checkNotNull(this.config); - LocalDate beginDate = LocalDate.now().atStartOfDay().toLocalDate(); - LocalDateTime temp = LocalDateTime.of(beginDate, LocalTime.MIDNIGHT); - LocalDate endDate = - temp.plus(securityConfig.getMaxCertificateDuration()).toLocalDate(); - X509CertificateHolder selfSignedCertificate = - SelfSignedCertificate - .newBuilder() - .setSubject(this.subject) - .setScmID(this.scmID) - .setClusterID(this.clusterID) - .setBeginDate(beginDate) - .setEndDate(endDate) - .makeCA() - .setConfiguration(securityConfig.getConfiguration()) - .setKey(key) - .build(); - - CertificateCodec certCodec = - new CertificateCodec(config, componentName); - certCodec.writeCertificate(selfSignedCertificate); - } - - /** - * This represents the verification status of the CA. Based on this enum - * appropriate action is taken in the Init. - */ - @VisibleForTesting - enum VerificationStatus { - SUCCESS, /* All artifacts needed by CertificateServer is present */ - MISSING_KEYS, /* Private key is missing, certificate Exists.*/ - MISSING_CERTIFICATE, /* Keys exist, but root certificate missing.*/ - INITIALIZE /* All artifacts are missing, we should init the system. */ - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java deleted file mode 100644 index 53eb98fbdc2ad..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles; - -import org.bouncycastle.asn1.x509.Extension; - -import java.util.function.BiFunction; - -import static java.lang.Boolean.TRUE; - -/** - * CA Profile, this is needed when SCM does HA. - * A place holder class indicating what we need to do when we support issuing - * CA certificates to other SCMs in HA mode. - */ -public class DefaultCAProfile extends DefaultProfile { - static final BiFunction - VALIDATE_BASIC_CONSTRAINTS = (e, b) -> TRUE; - static final BiFunction - VALIDATE_CRL_NUMBER = (e, b) -> TRUE; - static final BiFunction - VALIDATE_REASON_CODE = (e, b) -> TRUE; - static final BiFunction - VALIDATE_DELTA_CRL_INDICATOR = (e, b) -> TRUE; - static final BiFunction - VALIDATE_NAME_CONSTRAINTS = (e, b) -> TRUE; - static final BiFunction - VALIDATE_CRL_DISTRIBUTION_POINTS = (e, b) -> TRUE; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java deleted file mode 100644 index 5fdb6f7d96696..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java +++ /dev/null @@ -1,336 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles; - -import com.google.common.base.Preconditions; -import org.apache.commons.codec.DecoderException; -import org.apache.commons.codec.binary.Hex; -import org.apache.commons.validator.routines.DomainValidator; -import org.bouncycastle.asn1.ASN1ObjectIdentifier; -import org.bouncycastle.asn1.x500.RDN; -import org.bouncycastle.asn1.x509.ExtendedKeyUsage; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.asn1.x509.GeneralName; -import org.bouncycastle.asn1.x509.GeneralNames; -import org.bouncycastle.asn1.x509.KeyPurposeId; -import org.bouncycastle.asn1.x509.KeyUsage; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.AbstractMap.SimpleEntry; -import java.util.Arrays; -import java.util.BitSet; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.function.BiFunction; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static java.lang.Boolean.TRUE; -import static org.bouncycastle.asn1.x509.KeyPurposeId.id_kp_clientAuth; -import static org.bouncycastle.asn1.x509.KeyPurposeId.id_kp_serverAuth; - -/** - * Ozone PKI profile. - *

- * This PKI profile is invoked by SCM CA to make sure that certificates issued - * by SCM CA are constrained - */ -public class DefaultProfile implements PKIProfile { - static final BiFunction - VALIDATE_KEY_USAGE = DefaultProfile::validateKeyUsage; - static final BiFunction - VALIDATE_AUTHORITY_KEY_IDENTIFIER = (e, b) -> TRUE; - static final BiFunction - VALIDATE_LOGO_TYPE = (e, b) -> TRUE; - private static final Logger LOG = - LoggerFactory.getLogger(DefaultProfile.class); - static final BiFunction - VALIDATE_SAN = DefaultProfile::validateSubjectAlternativeName; - static final BiFunction - VALIDATE_EXTENDED_KEY_USAGE = DefaultProfile::validateExtendedKeyUsage; - // If we decide to add more General Names, we should add those here and - // also update the logic in validateGeneralName function. - private static final int[] GENERAL_NAMES = { - GeneralName.dNSName, - GeneralName.iPAddress, - }; - // Map that handles all the Extensions lookup and validations. - private static final Map> EXTENSIONS_MAP = Stream.of( - new SimpleEntry<>(Extension.keyUsage, VALIDATE_KEY_USAGE), - new SimpleEntry<>(Extension.subjectAlternativeName, VALIDATE_SAN), - new SimpleEntry<>(Extension.authorityKeyIdentifier, - VALIDATE_AUTHORITY_KEY_IDENTIFIER), - new SimpleEntry<>(Extension.extendedKeyUsage, - VALIDATE_EXTENDED_KEY_USAGE), - // Ozone certs are issued only for the use of Ozone. - // However, some users will discover that this is a full scale CA - // and decide to mis-use these certs for other purposes. - // To discourage usage of these certs for other purposes, we can leave - // the Ozone Logo inside these certs. So if a browser is used to - // connect these logos will show up. - // https://www.ietf.org/rfc/rfc3709.txt - new SimpleEntry<>(Extension.logoType, VALIDATE_LOGO_TYPE)) - .collect(Collectors.toMap(SimpleEntry::getKey, - SimpleEntry::getValue)); - // If we decide to add more General Names, we should add those here and - // also update the logic in validateGeneralName function. - private static final KeyPurposeId[] EXTENDED_KEY_USAGE = { - id_kp_serverAuth, // TLS Web server authentication - id_kp_clientAuth, // TLS Web client authentication - - }; - private final Set extendKeyPurposeSet; - private Set generalNameSet; - - /** - * Construct DefaultProfile. - */ - public DefaultProfile() { - generalNameSet = new HashSet<>(); - for (int val : GENERAL_NAMES) { - generalNameSet.add(val); - } - extendKeyPurposeSet = - new HashSet<>(Arrays.asList(EXTENDED_KEY_USAGE)); - - } - - /** - * This function validates that the KeyUsage Bits are subset of the Bits - * permitted by the ozone profile. - * - * @param ext - KeyUsage Extension. - * @param profile - PKI Profile - In this case this profile. - * @return True, if the request key usage is a subset, false otherwise. - */ - private static Boolean validateKeyUsage(Extension ext, PKIProfile profile) { - KeyUsage keyUsage = profile.getKeyUsage(); - KeyUsage requestedUsage = KeyUsage.getInstance(ext.getParsedValue()); - BitSet profileBitSet = BitSet.valueOf(keyUsage.getBytes()); - BitSet requestBitSet = BitSet.valueOf(requestedUsage.getBytes()); - // Check if the requestBitSet is a subset of profileBitSet - // p & r == r should be equal if it is a subset. - profileBitSet.and(requestBitSet); - return profileBitSet.equals(requestBitSet); - } - - /** - * Validates the SubjectAlternative names in the Certificate. - * - * @param ext - Extension - SAN, which allows us to get the SAN names. - * @param profile - This profile. - * @return - True if the request contains only SANs, General names that we - * support. False otherwise. - */ - private static Boolean validateSubjectAlternativeName(Extension ext, - PKIProfile profile) { - if (ext.isCritical()) { - // SAN extensions should not be marked as critical under ozone profile. - LOG.error("SAN extension marked as critical in the Extension. {}", - GeneralNames.getInstance(ext.getParsedValue()).toString()); - return false; - } - GeneralNames generalNames = GeneralNames.getInstance(ext.getParsedValue()); - for (GeneralName name : generalNames.getNames()) { - try { - if (!profile.validateGeneralName(name.getTagNo(), - name.getName().toString())) { - return false; - } - } catch (UnknownHostException e) { - LOG.error("IP address validation failed." - + name.getName().toString(), e); - return false; - } - } - return true; - } - - /** - * This function validates that the KeyUsage Bits are subset of the Bits - * permitted by the ozone profile. - * - * @param ext - KeyUsage Extension. - * @param profile - PKI Profile - In this case this profile. - * @return True, if the request key usage is a subset, false otherwise. - */ - private static Boolean validateExtendedKeyUsage(Extension ext, - PKIProfile profile) { - if (ext.isCritical()) { - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 - // Ozone profile opts to mark this extension as non-critical. - LOG.error("Extended Key usage marked as critical."); - return false; - } - ExtendedKeyUsage extendedKeyUsage = - ExtendedKeyUsage.getInstance(ext.getParsedValue()); - for (KeyPurposeId id : extendedKeyUsage.getUsages()) { - if (!profile.validateExtendedKeyUsage(id)) { - return false; - } - } - return true; - } - - /** - * {@inheritDoc} - */ - @Override - public int[] getGeneralNames() { - return Arrays.copyOfRange(GENERAL_NAMES, 0, GENERAL_NAMES.length); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean isSupportedGeneralName(int generalName) { - return generalNameSet.contains(generalName); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean validateGeneralName(int type, String value) { - // TODO : We should add more validation for IP address, for example - // it matches the local network, and domain matches where the cluster - // exits. - if (!isSupportedGeneralName(type)) { - return false; - } - switch (type) { - case GeneralName.iPAddress: - - // We need DatatypeConverter conversion, since the original CSR encodes - // an IP address int a Hex String, for example 8.8.8.8 is encoded as - // #08080808. Value string is always preceded by "#", we will strip - // that before passing it on. - - // getByAddress call converts the IP address to hostname/ipAddress format. - // if the hostname cannot determined then it will be /ipAddress. - - // TODO: Fail? if we cannot resolve the Hostname? - try { - final InetAddress byAddress = InetAddress.getByAddress( - Hex.decodeHex(value.substring(1))); - if (LOG.isDebugEnabled()) { - LOG.debug("Host Name/IP Address : {}", byAddress.toString()); - } - return true; - } catch (UnknownHostException | DecoderException e) { - return false; - } - case GeneralName.dNSName: - return DomainValidator.getInstance().isValid(value); - default: - // This should not happen, since it guarded via isSupportedGeneralName. - LOG.error("Unexpected type in General Name (int value) : " + type); - return false; - } - } - - @Override - public boolean validateExtendedKeyUsage(KeyPurposeId id) { - return extendKeyPurposeSet.contains(id); - } - - /** - * {@inheritDoc} - */ - @Override - public ASN1ObjectIdentifier[] getSupportedExtensions() { - return EXTENSIONS_MAP.keySet().toArray(new ASN1ObjectIdentifier[0]); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean isSupportedExtension(Extension extension) { - return EXTENSIONS_MAP.containsKey(extension.getExtnId()); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean validateExtension(Extension extension) { - Preconditions.checkNotNull(extension, "Extension cannot be null"); - - if (!isSupportedExtension(extension)) { - LOG.error("Unsupported Extension found: {} ", - extension.getExtnId().getId()); - return false; - } - - BiFunction func = - EXTENSIONS_MAP.get(extension.getExtnId()); - - if (func != null) { - return func.apply(extension, this); - } - return false; - } - - /** - * {@inheritDoc} - */ - @Override - public KeyUsage getKeyUsage() { - return new KeyUsage(KeyUsage.digitalSignature | KeyUsage.keyEncipherment - | KeyUsage.dataEncipherment | KeyUsage.keyAgreement); - } - - /** - * {@inheritDoc} - */ - @Override - public RDN[] getRDNs() { - return new RDN[0]; - } - - /** - * {@inheritDoc} - */ - @Override - public boolean isValidRDN(RDN distinguishedName) { - // TODO: Right now we just approve all strings. - return true; - } - - /** - * {@inheritDoc} - */ - @Override - public boolean validateRDN(RDN name) { - return true; - } - - @Override - public boolean isCA() { - return false; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/PKIProfile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/PKIProfile.java deleted file mode 100644 index c3ff198cd7089..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/PKIProfile.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles; - -import org.bouncycastle.asn1.ASN1ObjectIdentifier; -import org.bouncycastle.asn1.x500.RDN; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.asn1.x509.KeyPurposeId; -import org.bouncycastle.asn1.x509.KeyUsage; - -import java.net.UnknownHostException; - -/** - * Base class for profile rules. Generally profiles are documents that define - * the PKI policy. In HDDS/Ozone world, we have chosen to make PKIs - * executable code. So if an end-user wants to use a custom profile or one of - * the existing profile like the list below, they are free to implement a - * custom profile. - * - * PKIX - Internet PKI profile. - * FPKI - (US) Federal PKI profile. - * MISSI - US DoD profile. - * ISO 15782 - Banking - Certificate Management Part 1: Public Key - * Certificates. - * TeleTrust/MailTrusT - German MailTrusT profile for TeleTrusT (it - * really is - * capitalised that way). - * German SigG Profile - Profile to implement the German digital - * signature law - * ISIS Profile - Another German profile. - * Australian Profile - Profile for the Australian PKAF - * SS 61 43 31 Electronic ID Certificate - Swedish profile. - * FINEID S3 - Finnish profile. - * ANX Profile - Automotive Network Exchange profile. - * Microsoft Profile - This isn't a real profile, but windows uses this. - */ -public interface PKIProfile { - - /** - * Returns the list of General Names supported by this profile. - * @return - an Array of supported General Names by this certificate profile. - */ - int[] getGeneralNames(); - - /** - * Checks if a given General Name is permitted in this profile. - * @param generalName - General name. - * @return true if it is allowed, false otherwise. - */ - boolean isSupportedGeneralName(int generalName); - - /** - * Allows the profile to dictate what value ranges are valid. - * @param type - Type of the General Name. - * @param value - Value of the General Name. - * @return - true if the value is permitted, false otherwise. - * @throws UnknownHostException - on Error in IP validation. - */ - boolean validateGeneralName(int type, String value) - throws UnknownHostException; - - /** - * Returns an array of Object identifiers for extensions supported by this - * profile. - * @return an Array of ASN1ObjectIdentifier for the supported extensions. - */ - ASN1ObjectIdentifier[] getSupportedExtensions(); - - /** - * Checks if the this extension is permitted in this profile. - * @param extension - Extension to check for. - * @return - true if this extension is supported, false otherwise. - */ - boolean isSupportedExtension(Extension extension); - - /** - * Checks if the extension has the value which this profile approves. - * @param extension - Extension to validate. - * @return - True if the extension is acceptable, false otherwise. - */ - boolean validateExtension(Extension extension); - - /** - * Validate the Extended Key Usage. - * @param id - KeyPurpose ID - * @return true, if this is a supported Purpose, false otherwise. - */ - boolean validateExtendedKeyUsage(KeyPurposeId id); - - /** - * Returns the permitted Key usage mask while using this profile. - * @return KeyUsage - */ - KeyUsage getKeyUsage(); - - /** - * Gets the supported list of RDNs supported by this profile. - * @return Array of RDNs. - */ - RDN[] getRDNs(); - - /** - * Returns true if this Relative Distinguished Name component is allowed in - * this profile. - * @param distinguishedName - RDN to check. - * @return boolean, True if this RDN is allowed, false otherwise. - */ - boolean isValidRDN(RDN distinguishedName); - - /** - * Allows the profile to control the value set of the RDN. Profile can - * reject a RDN name if needed. - * @param name - RDN. - * @return true if the name is acceptable to this profile, false otherwise. - */ - boolean validateRDN(RDN name); - - /** - * True if the profile we are checking is for issuing a CA certificate. - * @return True, if the profile used is for CA, false otherwise. - */ - boolean isCA(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/package-info.java deleted file mode 100644 index 36c885d3108b3..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/package-info.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * PKI PKIProfile package supports different kind of profiles that certificates - * can support. If you are not familiar with PKI profiles, there is an - * excellent introduction at - * - * https://www.cs.auckland.ac.nz/~pgut001/pubs/x509guide.txt - * - * At high level, the profiles in this directory define what kinds of - * Extensions, General names , Key usage and critical extensions are - * permitted when the CA is functional. - * - * An excellent example of a profile would be ozone profile if you would - * like to see a reference to create your own profiles. - */ -package org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java deleted file mode 100644 index af53904eeb6cd..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Classes related to Certificate Life Cycle or Certificate Authority Server. - */ -package org.apache.hadoop.hdds.security.x509.certificate.authority; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java deleted file mode 100644 index 34b4930fa7d37..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.client; - -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; - -import java.io.InputStream; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.cert.CertStore; -import java.security.cert.X509Certificate; -import java.util.List; - -/** - * Certificate client provides and interface to certificate operations that - * needs to be performed by all clients in the Ozone eco-system. - */ -public interface CertificateClient { - - /** - * Returns the private key of the specified component if it exists on the - * local system. - * - * @return private key or Null if there is no data. - */ - PrivateKey getPrivateKey(); - - /** - * Returns the public key of the specified component if it exists on the local - * system. - * - * @return public key or Null if there is no data. - */ - PublicKey getPublicKey(); - - /** - * Returns the certificate of the specified component if it exists on the - * local system. - * @param certSerialId - * - * @return certificate or Null if there is no data. - */ - X509Certificate getCertificate(String certSerialId) - throws CertificateException; - - /** - * Returns the certificate of the specified component if it exists on the - * local system. - * - * @return certificate or Null if there is no data. - */ - X509Certificate getCertificate(); - - /** - * Return the latest CA certificate known to the client. - * @return latest ca certificate known to the client. - */ - X509Certificate getCACertificate(); - - /** - * Verifies if this certificate is part of a trusted chain. - * @param certificate - certificate. - * @return true if it trusted, false otherwise. - */ - boolean verifyCertificate(X509Certificate certificate); - - /** - * Creates digital signature over the data stream using the components private - * key. - * - * @param stream - Data stream to sign. - * @return byte array - containing the signature. - * @throws CertificateException - on Error. - */ - byte[] signDataStream(InputStream stream) - throws CertificateException; - - byte[] signData(byte[] data) throws CertificateException; - - /** - * Verifies a digital Signature, given the signature and the certificate of - * the signer. - * - * @param stream - Data Stream. - * @param signature - Byte Array containing the signature. - * @param cert - Certificate of the Signer. - * @return true if verified, false if not. - */ - boolean verifySignature(InputStream stream, byte[] signature, - X509Certificate cert) throws CertificateException; - - /** - * Verifies a digital Signature, given the signature and the certificate of - * the signer. - * @param data - Data in byte array. - * @param signature - Byte Array containing the signature. - * @param cert - Certificate of the Signer. - * @return true if verified, false if not. - */ - boolean verifySignature(byte[] data, byte[] signature, - X509Certificate cert) throws CertificateException; - - /** - * Returns a CSR builder that can be used to creates a Certificate sigining - * request. - * - * @return CertificateSignRequest.Builder - */ - CertificateSignRequest.Builder getCSRBuilder() throws CertificateException; - - /** - * Get the certificate of well-known entity from SCM. - * - * @param query - String Query, please see the implementation for the - * discussion on the query formats. - * @return X509Certificate or null if not found. - */ - X509Certificate queryCertificate(String query); - - /** - * Stores the Certificate for this client. Don't use this api to add - * trusted certificates of others. - * - * @param pemEncodedCert - pem encoded X509 Certificate - * @param force - override any existing file - * @throws CertificateException - on Error. - * - */ - void storeCertificate(String pemEncodedCert, boolean force) - throws CertificateException; - - /** - * Stores the Certificate for this client. Don't use this api to add - * trusted certificates of others. - * - * @param pemEncodedCert - pem encoded X509 Certificate - * @param force - override any existing file - * @param caCert - Is CA certificate. - * @throws CertificateException - on Error. - * - */ - void storeCertificate(String pemEncodedCert, boolean force, boolean caCert) - throws CertificateException; - - /** - * Stores the trusted chain of certificates. - * - * @param certStore - Cert Store. - * @throws CertificateException - on Error. - */ - void storeTrustChain(CertStore certStore) throws CertificateException; - - /** - * Stores the trusted chain of certificates. - * - * @param certificates - List of Certificates. - - * @throws CertificateException - on Error. - */ - void storeTrustChain(List certificates) - throws CertificateException; - - /** - * Initialize certificate client. - * - * */ - InitResponse init() throws CertificateException; - - /** - * Represents initialization response of client. - * 1. SUCCESS: Means client is initialized successfully and all required - * files are in expected state. - * 2. FAILURE: Initialization failed due to some unrecoverable error. - * 3. GETCERT: Bootstrap of keypair is successful but certificate is not - * found. Client should request SCM signed certificate. - * - */ - enum InitResponse { - SUCCESS, - FAILURE, - GETCERT, - RECOVER - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java deleted file mode 100644 index 76986586d344c..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.client; - -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.hdds.security.x509.SecurityConfig; - -/** - * Certificate client for DataNodes. - */ -public class DNCertificateClient extends DefaultCertificateClient { - - private static final Logger LOG = - LoggerFactory.getLogger(DNCertificateClient.class); - - public static final String COMPONENT_NAME = "dn"; - - public DNCertificateClient(SecurityConfig securityConfig, - String certSerialId) { - super(securityConfig, LOG, certSerialId, COMPONENT_NAME); - } - - public DNCertificateClient(SecurityConfig securityConfig) { - super(securityConfig, LOG, null, COMPONENT_NAME); - } - - /** - * Returns a CSR builder that can be used to creates a Certificate signing - * request. - * - * @return CertificateSignRequest.Builder - */ - @Override - public CertificateSignRequest.Builder getCSRBuilder() - throws CertificateException { - return super.getCSRBuilder() - .setDigitalEncryption(false) - .setDigitalSignature(false); - } - - public Logger getLogger() { - return LOG; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java deleted file mode 100644 index ff99e080c49eb..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java +++ /dev/null @@ -1,828 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.client; - -import com.google.common.base.Preconditions; -import org.apache.commons.io.FilenameUtils; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.math.NumberUtils; -import org.apache.commons.validator.routines.DomainValidator; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB; -import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.bouncycastle.cert.X509CertificateHolder; -import org.slf4j.Logger; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.net.InetSocketAddress; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.InvalidKeyException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.Signature; -import java.security.SignatureException; -import java.security.cert.CertStore; -import java.security.cert.X509Certificate; -import java.security.spec.InvalidKeySpecException; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.ConcurrentHashMap; - -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.GETCERT; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.SUCCESS; -import static org.apache.hadoop.hdds.security.x509.exceptions.CertificateException.ErrorCode.*; - -/** - * Default Certificate client implementation. It provides certificate - * operations that needs to be performed by certificate clients in the Ozone - * eco-system. - */ -public abstract class DefaultCertificateClient implements CertificateClient { - - private static final String CERT_FILE_NAME_FORMAT = "%s.crt"; - private static final String CA_CERT_PREFIX = "CA-"; - private static final int CA_CERT_PREFIX_LEN = 3; - private final Logger logger; - private final SecurityConfig securityConfig; - private final KeyCodec keyCodec; - private PrivateKey privateKey; - private PublicKey publicKey; - private X509Certificate x509Certificate; - private Map certificateMap; - private String certSerialId; - private String caCertId; - private String component; - - DefaultCertificateClient(SecurityConfig securityConfig, Logger log, - String certSerialId, String component) { - Objects.requireNonNull(securityConfig); - this.securityConfig = securityConfig; - keyCodec = new KeyCodec(securityConfig, component); - this.logger = log; - this.certificateMap = new ConcurrentHashMap<>(); - this.certSerialId = certSerialId; - this.component = component; - - loadAllCertificates(); - } - - /** - * Load all certificates from configured location. - * */ - private void loadAllCertificates() { - // See if certs directory exists in file system. - Path certPath = securityConfig.getCertificateLocation(component); - if (Files.exists(certPath) && Files.isDirectory(certPath)) { - getLogger().info("Loading certificate from location:{}.", - certPath); - File[] certFiles = certPath.toFile().listFiles(); - - if (certFiles != null) { - CertificateCodec certificateCodec = - new CertificateCodec(securityConfig, component); - long latestCaCertSerailId = -1L; - for (File file : certFiles) { - if (file.isFile()) { - try { - X509CertificateHolder x509CertificateHolder = certificateCodec - .readCertificate(certPath, file.getName()); - X509Certificate cert = - CertificateCodec.getX509Certificate(x509CertificateHolder); - if (cert != null && cert.getSerialNumber() != null) { - if (cert.getSerialNumber().toString().equals(certSerialId)) { - x509Certificate = cert; - } - certificateMap.putIfAbsent(cert.getSerialNumber().toString(), - cert); - if (file.getName().startsWith(CA_CERT_PREFIX)) { - String certFileName = FilenameUtils.getBaseName( - file.getName()); - long tmpCaCertSerailId = NumberUtils.toLong( - certFileName.substring(CA_CERT_PREFIX_LEN)); - if (tmpCaCertSerailId > latestCaCertSerailId) { - latestCaCertSerailId = tmpCaCertSerailId; - } - } - getLogger().info("Added certificate from file:{}.", - file.getAbsolutePath()); - } else { - getLogger().error("Error reading certificate from file:{}", - file); - } - } catch (java.security.cert.CertificateException | IOException e) { - getLogger().error("Error reading certificate from file:{}.", - file.getAbsolutePath(), e); - } - } - } - if (latestCaCertSerailId != -1) { - caCertId = Long.toString(latestCaCertSerailId); - } - } - } - } - - /** - * Returns the private key of the specified if it exists on the local - * system. - * - * @return private key or Null if there is no data. - */ - @Override - public PrivateKey getPrivateKey() { - if (privateKey != null) { - return privateKey; - } - - Path keyPath = securityConfig.getKeyLocation(component); - if (OzoneSecurityUtil.checkIfFileExist(keyPath, - securityConfig.getPrivateKeyFileName())) { - try { - privateKey = keyCodec.readPrivateKey(); - } catch (InvalidKeySpecException | NoSuchAlgorithmException - | IOException e) { - getLogger().error("Error while getting private key.", e); - } - } - return privateKey; - } - - /** - * Returns the public key of the specified if it exists on the local system. - * - * @return public key or Null if there is no data. - */ - @Override - public PublicKey getPublicKey() { - if (publicKey != null) { - return publicKey; - } - - Path keyPath = securityConfig.getKeyLocation(component); - if (OzoneSecurityUtil.checkIfFileExist(keyPath, - securityConfig.getPublicKeyFileName())) { - try { - publicKey = keyCodec.readPublicKey(); - } catch (InvalidKeySpecException | NoSuchAlgorithmException - | IOException e) { - getLogger().error("Error while getting public key.", e); - } - } - return publicKey; - } - - /** - * Returns the default certificate of given client if it exists. - * - * @return certificate or Null if there is no data. - */ - @Override - public X509Certificate getCertificate() { - if (x509Certificate != null) { - return x509Certificate; - } - - if (certSerialId == null) { - getLogger().error("Default certificate serial id is not set. Can't " + - "locate the default certificate for this client."); - return null; - } - // Refresh the cache from file system. - loadAllCertificates(); - if (certificateMap.containsKey(certSerialId)) { - x509Certificate = certificateMap.get(certSerialId); - } - return x509Certificate; - } - - /** - * Return the latest CA certificate known to the client. - * @return latest ca certificate known to the client. - */ - @Override - public X509Certificate getCACertificate() { - if (caCertId != null) { - return certificateMap.get(caCertId); - } - return null; - } - - /** - * Returns the certificate with the specified certificate serial id if it - * exists else try to get it from SCM. - * @param certId - * - * @return certificate or Null if there is no data. - */ - @Override - public X509Certificate getCertificate(String certId) - throws CertificateException { - // Check if it is in cache. - if (certificateMap.containsKey(certId)) { - return certificateMap.get(certId); - } - // Try to get it from SCM. - return this.getCertificateFromScm(certId); - } - - /** - * Get certificate from SCM and store it in local file system. - * @param certId - * @return certificate - */ - private X509Certificate getCertificateFromScm(String certId) - throws CertificateException { - - getLogger().info("Getting certificate with certSerialId:{}.", - certId); - try { - SCMSecurityProtocol scmSecurityProtocolClient = getScmSecurityClient( - (OzoneConfiguration) securityConfig.getConfiguration()); - String pemEncodedCert = - scmSecurityProtocolClient.getCertificate(certId); - this.storeCertificate(pemEncodedCert, true); - return CertificateCodec.getX509Certificate(pemEncodedCert); - } catch (Exception e) { - getLogger().error("Error while getting Certificate with " + - "certSerialId:{} from scm.", certId, e); - throw new CertificateException("Error while getting certificate for " + - "certSerialId:" + certId, e, CERTIFICATE_ERROR); - } - } - - /** - * Verifies if this certificate is part of a trusted chain. - * - * @param certificate - certificate. - * @return true if it trusted, false otherwise. - */ - @Override - public boolean verifyCertificate(X509Certificate certificate) { - throw new UnsupportedOperationException("Operation not supported."); - } - - /** - * Creates digital signature over the data stream using the s private key. - * - * @param stream - Data stream to sign. - * @throws CertificateException - on Error. - */ - @Override - public byte[] signDataStream(InputStream stream) - throws CertificateException { - try { - Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(), - securityConfig.getProvider()); - sign.initSign(getPrivateKey()); - byte[] buffer = new byte[1024 * 4]; - - int len; - while (-1 != (len = stream.read(buffer))) { - sign.update(buffer, 0, len); - } - return sign.sign(); - } catch (NoSuchAlgorithmException | NoSuchProviderException - | InvalidKeyException | SignatureException | IOException e) { - getLogger().error("Error while signing the stream", e); - throw new CertificateException("Error while signing the stream", e, - CRYPTO_SIGN_ERROR); - } - } - - /** - * Creates digital signature over the data stream using the s private key. - * - * @param data - Data to sign. - * @throws CertificateException - on Error. - */ - @Override - public byte[] signData(byte[] data) throws CertificateException { - try { - Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(), - securityConfig.getProvider()); - - sign.initSign(getPrivateKey()); - sign.update(data); - - return sign.sign(); - } catch (NoSuchAlgorithmException | NoSuchProviderException - | InvalidKeyException | SignatureException e) { - getLogger().error("Error while signing the stream", e); - throw new CertificateException("Error while signing the stream", e, - CRYPTO_SIGN_ERROR); - } - } - - /** - * Verifies a digital Signature, given the signature and the certificate of - * the signer. - * - * @param stream - Data Stream. - * @param signature - Byte Array containing the signature. - * @param cert - Certificate of the Signer. - * @return true if verified, false if not. - */ - @Override - public boolean verifySignature(InputStream stream, byte[] signature, - X509Certificate cert) throws CertificateException { - try { - Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(), - securityConfig.getProvider()); - sign.initVerify(cert); - byte[] buffer = new byte[1024 * 4]; - - int len; - while (-1 != (len = stream.read(buffer))) { - sign.update(buffer, 0, len); - } - return sign.verify(signature); - } catch (NoSuchAlgorithmException | NoSuchProviderException - | InvalidKeyException | SignatureException | IOException e) { - getLogger().error("Error while signing the stream", e); - throw new CertificateException("Error while signing the stream", e, - CRYPTO_SIGNATURE_VERIFICATION_ERROR); - } - } - - /** - * Verifies a digital Signature, given the signature and the certificate of - * the signer. - * - * @param data - Data in byte array. - * @param signature - Byte Array containing the signature. - * @param cert - Certificate of the Signer. - * @return true if verified, false if not. - */ - @Override - public boolean verifySignature(byte[] data, byte[] signature, - X509Certificate cert) throws CertificateException { - try { - Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(), - securityConfig.getProvider()); - sign.initVerify(cert); - sign.update(data); - return sign.verify(signature); - } catch (NoSuchAlgorithmException | NoSuchProviderException - | InvalidKeyException | SignatureException e) { - getLogger().error("Error while signing the stream", e); - throw new CertificateException("Error while signing the stream", e, - CRYPTO_SIGNATURE_VERIFICATION_ERROR); - } - } - - /** - * Verifies a digital Signature, given the signature and the certificate of - * the signer. - * - * @param data - Data in byte array. - * @param signature - Byte Array containing the signature. - * @param pubKey - Certificate of the Signer. - * @return true if verified, false if not. - */ - private boolean verifySignature(byte[] data, byte[] signature, - PublicKey pubKey) throws CertificateException { - try { - Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(), - securityConfig.getProvider()); - sign.initVerify(pubKey); - sign.update(data); - return sign.verify(signature); - } catch (NoSuchAlgorithmException | NoSuchProviderException - | InvalidKeyException | SignatureException e) { - getLogger().error("Error while signing the stream", e); - throw new CertificateException("Error while signing the stream", e, - CRYPTO_SIGNATURE_VERIFICATION_ERROR); - } - } - - /** - * Returns a CSR builder that can be used to creates a Certificate signing - * request. - * - * @return CertificateSignRequest.Builder - */ - @Override - public CertificateSignRequest.Builder getCSRBuilder() - throws CertificateException { - CertificateSignRequest.Builder builder = - new CertificateSignRequest.Builder() - .setConfiguration(securityConfig.getConfiguration()); - try { - DomainValidator validator = DomainValidator.getInstance(); - // Add all valid ips. - OzoneSecurityUtil.getValidInetsForCurrentHost().forEach( - ip -> { - builder.addIpAddress(ip.getHostAddress()); - if(validator.isValid(ip.getCanonicalHostName())) { - builder.addDnsName(ip.getCanonicalHostName()); - } - }); - } catch (IOException e) { - throw new CertificateException("Error while adding ip to CSR builder", - e, CSR_ERROR); - } - return builder; - } - - /** - * Get the certificate of well-known entity from SCM. - * - * @param query - String Query, please see the implementation for the - * discussion on the query formats. - * @return X509Certificate or null if not found. - */ - @Override - public X509Certificate queryCertificate(String query) { - // TODO: - throw new UnsupportedOperationException("Operation not supported"); - } - - /** - * Stores the Certificate for this client. Don't use this api to add trusted - * certificates of others. - * - * @param pemEncodedCert - pem encoded X509 Certificate - * @param force - override any existing file - * @throws CertificateException - on Error. - * - */ - @Override - public void storeCertificate(String pemEncodedCert, boolean force) - throws CertificateException { - this.storeCertificate(pemEncodedCert, force, false); - } - - /** - * Stores the Certificate for this client. Don't use this api to add trusted - * certificates of others. - * - * @param pemEncodedCert - pem encoded X509 Certificate - * @param force - override any existing file - * @param caCert - Is CA certificate. - * @throws CertificateException - on Error. - * - */ - @Override - public void storeCertificate(String pemEncodedCert, boolean force, - boolean caCert) throws CertificateException { - CertificateCodec certificateCodec = new CertificateCodec(securityConfig, - component); - try { - Path basePath = securityConfig.getCertificateLocation(component); - - X509Certificate cert = - CertificateCodec.getX509Certificate(pemEncodedCert); - String certName = String.format(CERT_FILE_NAME_FORMAT, - cert.getSerialNumber().toString()); - - if(caCert) { - certName = CA_CERT_PREFIX + certName; - caCertId = cert.getSerialNumber().toString(); - } - - certificateCodec.writeCertificate(basePath, certName, - pemEncodedCert, force); - certificateMap.putIfAbsent(cert.getSerialNumber().toString(), cert); - } catch (IOException | java.security.cert.CertificateException e) { - throw new CertificateException("Error while storing certificate.", e, - CERTIFICATE_ERROR); - } - } - - /** - * Stores the trusted chain of certificates for a specific . - * - * @param ks - Key Store. - * @throws CertificateException - on Error. - */ - @Override - public synchronized void storeTrustChain(CertStore ks) - throws CertificateException { - throw new UnsupportedOperationException("Operation not supported."); - } - - - /** - * Stores the trusted chain of certificates for a specific . - * - * @param certificates - List of Certificates. - * @throws CertificateException - on Error. - */ - @Override - public synchronized void storeTrustChain(List certificates) - throws CertificateException { - throw new UnsupportedOperationException("Operation not supported."); - } - - /** - * Defines 8 cases of initialization. - * Each case specifies objects found. - * 0. NONE Keypair as well as certificate not found. - * 1. CERT Certificate found but keypair missing. - * 2. PUBLIC_KEY Public key found but private key and - * certificate is missing. - * 3. PUBLICKEY_CERT Only public key and certificate is present. - * 4. PRIVATE_KEY Only private key is present. - * 5. PRIVATEKEY_CERT Only private key and certificate is present. - * 6. PUBLICKEY_PRIVATEKEY indicates private and public key were read - * successfully from configured location but - * Certificate. - * 7. All Keypair as well as certificate is present. - * - * */ - protected enum InitCase { - NONE, - CERT, - PUBLIC_KEY, - PUBLICKEY_CERT, - PRIVATE_KEY, - PRIVATEKEY_CERT, - PUBLICKEY_PRIVATEKEY, - ALL - } - - /** - * - * Initializes client by performing following actions. - * 1. Create key dir if not created already. - * 2. Generates and stores a keypair. - * 3. Try to recover public key if private key and certificate is present - * but public key is missing. - * - * Truth table: - * +--------------+-----------------+--------------+----------------+ - * | Private Key | Public Keys | Certificate | Result | - * +--------------+-----------------+--------------+----------------+ - * | False (0) | False (0) | False (0) | GETCERT 000 | - * | False (0) | False (0) | True (1) | FAILURE 001 | - * | False (0) | True (1) | False (0) | FAILURE 010 | - * | False (0) | True (1) | True (1) | FAILURE 011 | - * | True (1) | False (0) | False (0) | FAILURE 100 | - * | True (1) | False (0) | True (1) | SUCCESS 101 | - * | True (1) | True (1) | False (0) | GETCERT 110 | - * | True (1) | True (1) | True (1) | SUCCESS 111 | - * +--------------+-----------------+--------------+----------------+ - * - * @return InitResponse - * Returns FAILURE in following cases: - * 1. If private key is missing but public key or certificate is available. - * 2. If public key and certificate is missing. - * - * Returns SUCCESS in following cases: - * 1. If keypair as well certificate is available. - * 2. If private key and certificate is available and public key is - * recovered successfully. - * - * Returns GETCERT in following cases: - * 1. First time when keypair and certificate is not available, keypair - * will be generated and stored at configured location. - * 2. When keypair (public/private key) is available but certificate is - * missing. - * - */ - @Override - public synchronized InitResponse init() throws CertificateException { - int initCase = 0; - PrivateKey pvtKey= getPrivateKey(); - PublicKey pubKey = getPublicKey(); - X509Certificate certificate = getCertificate(); - - if(pvtKey != null){ - initCase = initCase | 1<<2; - } - if(pubKey != null){ - initCase = initCase | 1<<1; - } - if(certificate != null){ - initCase = initCase | 1; - } - getLogger().info("Certificate client init case: {}", initCase); - Preconditions.checkArgument(initCase < 8, "Not a " + - "valid case."); - InitCase init = InitCase.values()[initCase]; - return handleCase(init); - } - - /** - * Default handling of each {@link InitCase}. - * */ - protected InitResponse handleCase(InitCase init) - throws CertificateException { - switch (init) { - case NONE: - getLogger().info("Creating keypair for client as keypair and " + - "certificate not found."); - bootstrapClientKeys(); - return GETCERT; - case CERT: - getLogger().error("Private key not found, while certificate is still" + - " present. Delete keypair and try again."); - return FAILURE; - case PUBLIC_KEY: - getLogger().error("Found public key but private key and certificate " + - "missing."); - return FAILURE; - case PRIVATE_KEY: - getLogger().info("Found private key but public key and certificate " + - "is missing."); - // TODO: Recovering public key from private might be possible in some - // cases. - return FAILURE; - case PUBLICKEY_CERT: - getLogger().error("Found public key and certificate but private " + - "key is missing."); - return FAILURE; - case PRIVATEKEY_CERT: - getLogger().info("Found private key and certificate but public key" + - " missing."); - if (recoverPublicKey()) { - return SUCCESS; - } else { - getLogger().error("Public key recovery failed."); - return FAILURE; - } - case PUBLICKEY_PRIVATEKEY: - getLogger().info("Found private and public key but certificate is" + - " missing."); - if (validateKeyPair(getPublicKey())) { - return GETCERT; - } else { - getLogger().info("Keypair validation failed."); - return FAILURE; - } - case ALL: - getLogger().info("Found certificate file along with KeyPair."); - if (validateKeyPairAndCertificate()) { - return SUCCESS; - } else { - return FAILURE; - } - default: - getLogger().error("Unexpected case: {} (private/public/cert)", - Integer.toBinaryString(init.ordinal())); - - return FAILURE; - } - } - - /** - * Validate keypair and certificate. - * */ - protected boolean validateKeyPairAndCertificate() throws - CertificateException { - if (validateKeyPair(getPublicKey())) { - getLogger().info("Keypair validated."); - // TODO: Certificates cryptographic validity can be checked as well. - if (validateKeyPair(getCertificate().getPublicKey())) { - getLogger().info("Keypair validated with certificate."); - } else { - getLogger().error("Stored certificate is generated with different " + - "private key."); - return false; - } - } else { - getLogger().error("Keypair validation failed."); - return false; - } - return true; - } - - /** - * Tries to recover public key from certificate. Also validates recovered - * public key. - * */ - protected boolean recoverPublicKey() throws CertificateException { - PublicKey pubKey = getCertificate().getPublicKey(); - try { - - if(validateKeyPair(pubKey)){ - keyCodec.writePublicKey(pubKey); - publicKey = pubKey; - } else { - getLogger().error("Can't recover public key " + - "corresponding to private key.", BOOTSTRAP_ERROR); - return false; - } - } catch (IOException e) { - throw new CertificateException("Error while trying to recover " + - "public key.", e, BOOTSTRAP_ERROR); - } - return true; - } - - /** - * Validates public and private key of certificate client. - * - * @param pubKey - * */ - protected boolean validateKeyPair(PublicKey pubKey) - throws CertificateException { - byte[] challenge = RandomStringUtils.random(1000).getBytes( - StandardCharsets.UTF_8); - byte[] sign = signDataStream(new ByteArrayInputStream(challenge)); - return verifySignature(challenge, sign, pubKey); - } - - /** - * Bootstrap the client by creating keypair and storing it in configured - * location. - * */ - protected void bootstrapClientKeys() throws CertificateException { - Path keyPath = securityConfig.getKeyLocation(component); - if (Files.notExists(keyPath)) { - try { - Files.createDirectories(keyPath); - } catch (IOException e) { - throw new CertificateException("Error while creating directories " + - "for certificate storage.", BOOTSTRAP_ERROR); - } - } - KeyPair keyPair = createKeyPair(); - privateKey = keyPair.getPrivate(); - publicKey = keyPair.getPublic(); - } - - protected KeyPair createKeyPair() throws CertificateException { - HDDSKeyGenerator keyGenerator = new HDDSKeyGenerator(securityConfig); - KeyPair keyPair = null; - try { - keyPair = keyGenerator.generateKey(); - keyCodec.writePublicKey(keyPair.getPublic()); - keyCodec.writePrivateKey(keyPair.getPrivate()); - } catch (NoSuchProviderException | NoSuchAlgorithmException - | IOException e) { - getLogger().error("Error while bootstrapping certificate client.", e); - throw new CertificateException("Error while bootstrapping certificate.", - BOOTSTRAP_ERROR); - } - return keyPair; - } - - public Logger getLogger() { - return logger; - } - - /** - * Create a scm security client, used to get SCM signed certificate. - * - * @return {@link SCMSecurityProtocol} - */ - private static SCMSecurityProtocol getScmSecurityClient( - OzoneConfiguration conf) throws IOException { - RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class, - ProtobufRpcEngine.class); - long scmVersion = - RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class); - InetSocketAddress scmSecurityProtoAdd = - HddsUtils.getScmAddressForSecurityProtocol(conf); - SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient = - new SCMSecurityProtocolClientSideTranslatorPB( - RPC.getProxy(SCMSecurityProtocolPB.class, scmVersion, - scmSecurityProtoAdd, UserGroupInformation.getCurrentUser(), - conf, NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))); - return scmSecurityClient; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java deleted file mode 100644 index cb3ce7536e1ef..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.client; - -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; - -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.GETCERT; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.RECOVER; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.SUCCESS; - -/** - * Certificate client for OzoneManager. - */ -public class OMCertificateClient extends DefaultCertificateClient { - - private static final Logger LOG = - LoggerFactory.getLogger(OMCertificateClient.class); - - public static final String COMPONENT_NAME = "om"; - - public OMCertificateClient(SecurityConfig securityConfig, - String certSerialId) { - super(securityConfig, LOG, certSerialId, COMPONENT_NAME); - } - - public OMCertificateClient(SecurityConfig securityConfig) { - super(securityConfig, LOG, null, COMPONENT_NAME); - } - - protected InitResponse handleCase(InitCase init) throws - CertificateException { - switch (init) { - case NONE: - LOG.info("Creating keypair for client as keypair and certificate not " + - "found."); - bootstrapClientKeys(); - return GETCERT; - case CERT: - LOG.error("Private key not found, while certificate is still present." + - "Delete keypair and try again."); - return FAILURE; - case PUBLIC_KEY: - LOG.error("Found public key but private key and certificate missing."); - return FAILURE; - case PRIVATE_KEY: - LOG.info("Found private key but public key and certificate is missing."); - // TODO: Recovering public key from private might be possible in some - // cases. - return FAILURE; - case PUBLICKEY_CERT: - LOG.error("Found public key and certificate but private key is " + - "missing."); - return FAILURE; - case PRIVATEKEY_CERT: - LOG.info("Found private key and certificate but public key missing."); - if (recoverPublicKey()) { - return SUCCESS; - } else { - LOG.error("Public key recovery failed."); - return FAILURE; - } - case PUBLICKEY_PRIVATEKEY: - LOG.info("Found private and public key but certificate is missing."); - if (validateKeyPair(getPublicKey())) { - return RECOVER; - } else { - LOG.error("Keypair validation failed."); - return FAILURE; - } - case ALL: - LOG.info("Found certificate file along with KeyPair."); - if (validateKeyPairAndCertificate()) { - return SUCCESS; - } else { - return FAILURE; - } - default: - LOG.error("Unexpected case: {} (private/public/cert)", - Integer.toBinaryString(init.ordinal())); - return FAILURE; - } - } - - /** - * Returns a CSR builder that can be used to creates a Certificate signing - * request. - * - * @return CertificateSignRequest.Builder - */ - @Override - public CertificateSignRequest.Builder getCSRBuilder() - throws CertificateException { - return super.getCSRBuilder() - .setDigitalEncryption(true) - .setDigitalSignature(true); - } - - - public Logger getLogger() { - return LOG; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/package-info.java deleted file mode 100644 index dea609bd249be..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Classes related to creating and using certificates. - */ -package org.apache.hadoop.hdds.security.x509.certificate.client; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java deleted file mode 100644 index 2c8721b199bd7..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java +++ /dev/null @@ -1,299 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.utils; - -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; -import org.bouncycastle.openssl.jcajce.JcaPEMWriter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.StringWriter; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.attribute.PosixFilePermission; -import java.security.cert.CertificateEncodingException; -import java.security.cert.CertificateException; -import java.security.cert.CertificateFactory; -import java.security.cert.X509Certificate; -import java.util.Set; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.nio.file.attribute.PosixFilePermission.OWNER_EXECUTE; -import static java.nio.file.attribute.PosixFilePermission.OWNER_READ; -import static java.nio.file.attribute.PosixFilePermission.OWNER_WRITE; - -/** - * A class used to read and write X.509 certificates PEM encoded Streams. - */ -public class CertificateCodec { - public static final String BEGIN_CERT = "-----BEGIN CERTIFICATE-----"; - public static final String END_CERT = "-----END CERTIFICATE-----"; - - private static final Logger LOG = - LoggerFactory.getLogger(CertificateCodec.class); - private static final JcaX509CertificateConverter CERTIFICATE_CONVERTER - = new JcaX509CertificateConverter(); - private final SecurityConfig securityConfig; - private final Path location; - private Set permissionSet = - Stream.of(OWNER_READ, OWNER_WRITE, OWNER_EXECUTE) - .collect(Collectors.toSet()); - /** - * Creates a CertificateCodec with component name. - * - * @param config - Security Config. - * @param component - Component String. - */ - public CertificateCodec(SecurityConfig config, String component) { - this.securityConfig = config; - this.location = securityConfig.getCertificateLocation(component); - } - - /** - * Returns a X509 Certificate from the Certificate Holder. - * - * @param holder - Holder - * @return X509Certificate. - * @throws CertificateException - on Error. - */ - public static X509Certificate getX509Certificate(X509CertificateHolder holder) - throws CertificateException { - return CERTIFICATE_CONVERTER.getCertificate(holder); - } - - /** - * Returns the Certificate as a PEM encoded String. - * - * @param x509CertHolder - X.509 Certificate Holder. - * @return PEM Encoded Certificate String. - * @throws SCMSecurityException - On failure to create a PEM String. - */ - public static String getPEMEncodedString(X509CertificateHolder x509CertHolder) - throws SCMSecurityException { - try { - return getPEMEncodedString(getX509Certificate(x509CertHolder)); - } catch (CertificateException exp) { - throw new SCMSecurityException(exp); - } - } - - /** - * Returns the Certificate as a PEM encoded String. - * - * @param certificate - X.509 Certificate. - * @return PEM Encoded Certificate String. - * @throws SCMSecurityException - On failure to create a PEM String. - */ - public static String getPEMEncodedString(X509Certificate certificate) - throws SCMSecurityException { - try { - StringWriter stringWriter = new StringWriter(); - try (JcaPEMWriter pemWriter = new JcaPEMWriter(stringWriter)) { - pemWriter.writeObject(certificate); - } - return stringWriter.toString(); - } catch (IOException e) { - LOG.error("Error in encoding certificate." + certificate - .getSubjectDN().toString(), e); - throw new SCMSecurityException("PEM Encoding failed for certificate." + - certificate.getSubjectDN().toString(), e); - } - } - - /** - * Gets the X.509 Certificate from PEM encoded String. - * - * @param pemEncodedString - PEM encoded String. - * @return X509Certificate - Certificate. - * @throws CertificateException - Thrown on Failure. - * @throws IOException - Thrown on Failure. - */ - public static X509Certificate getX509Certificate(String pemEncodedString) - throws CertificateException, IOException { - CertificateFactory fact = CertificateFactory.getInstance("X.509"); - try (InputStream input = IOUtils.toInputStream(pemEncodedString, UTF_8)) { - return (X509Certificate) fact.generateCertificate(input); - } - } - - /** - * Get Certificate location. - * - * @return Path - */ - public Path getLocation() { - return location; - } - - /** - * Gets the X.509 Certificate from PEM encoded String. - * - * @param pemEncodedString - PEM encoded String. - * @return X509Certificate - Certificate. - * @throws CertificateException - Thrown on Failure. - * @throws IOException - Thrown on Failure. - */ - public static X509Certificate getX509Cert(String pemEncodedString) - throws CertificateException, IOException { - CertificateFactory fact = CertificateFactory.getInstance("X.509"); - try (InputStream input = IOUtils.toInputStream(pemEncodedString, UTF_8)) { - return (X509Certificate) fact.generateCertificate(input); - } - } - - /** - * Write the Certificate pointed to the location by the configs. - * - * @param xCertificate - Certificate to write. - * @throws SCMSecurityException - on Error. - * @throws IOException - on Error. - */ - public void writeCertificate(X509CertificateHolder xCertificate) - throws SCMSecurityException, IOException { - String pem = getPEMEncodedString(xCertificate); - writeCertificate(location.toAbsolutePath(), - this.securityConfig.getCertificateFileName(), pem, false); - } - - /** - * Write the Certificate to the specific file. - * - * @param xCertificate - Certificate to write. - * @param fileName - file name to write to. - * @param overwrite - boolean value, true means overwrite an existing - * certificate. - * @throws SCMSecurityException - On Error. - * @throws IOException - On Error. - */ - public void writeCertificate(X509CertificateHolder xCertificate, - String fileName, boolean overwrite) - throws SCMSecurityException, IOException { - String pem = getPEMEncodedString(xCertificate); - writeCertificate(location.toAbsolutePath(), fileName, pem, overwrite); - } - - /** - * Helper function that writes data to the file. - * - * @param basePath - Base Path where the file needs to written to. - * @param fileName - Certificate file name. - * @param pemEncodedCertificate - pemEncoded Certificate file. - * @param force - Overwrite if the file exists. - * @throws IOException - on Error. - */ - public synchronized void writeCertificate(Path basePath, String fileName, - String pemEncodedCertificate, boolean force) - throws IOException { - File certificateFile = - Paths.get(basePath.toString(), fileName).toFile(); - if (certificateFile.exists() && !force) { - throw new SCMSecurityException("Specified certificate file already " + - "exists.Please use force option if you want to overwrite it."); - } - if (!basePath.toFile().exists()) { - if (!basePath.toFile().mkdirs()) { - LOG.error("Unable to create file path. Path: {}", basePath); - throw new IOException("Creation of the directories failed." - + basePath.toString()); - } - } - try (FileOutputStream file = new FileOutputStream(certificateFile)) { - IOUtils.write(pemEncodedCertificate, file, UTF_8); - } - - Files.setPosixFilePermissions(certificateFile.toPath(), permissionSet); - } - - /** - * Rertuns a default certificate using the default paths for this component. - * - * @return X509CertificateHolder. - * @throws SCMSecurityException - on Error. - * @throws CertificateException - on Error. - * @throws IOException - on Error. - */ - public X509CertificateHolder readCertificate() throws - CertificateException, IOException { - return readCertificate(this.location.toAbsolutePath(), - this.securityConfig.getCertificateFileName()); - } - - /** - * Returns the certificate from the specific PEM encoded file. - * - * @param basePath - base path - * @param fileName - fileName - * @return X%09 Certificate - * @throws IOException - on Error. - * @throws SCMSecurityException - on Error. - * @throws CertificateException - on Error. - */ - public synchronized X509CertificateHolder readCertificate(Path basePath, - String fileName) throws IOException, CertificateException { - File certificateFile = Paths.get(basePath.toString(), fileName).toFile(); - return getX509CertificateHolder(certificateFile); - } - - /** - * Helper function to read certificate. - * - * @param certificateFile - Full path to certificate file. - * @return X509CertificateHolder - * @throws IOException - On Error. - * @throws CertificateException - On Error. - */ - private X509CertificateHolder getX509CertificateHolder(File certificateFile) - throws IOException, CertificateException { - if (!certificateFile.exists()) { - throw new IOException("Unable to find the requested certificate. Path: " - + certificateFile.toString()); - } - CertificateFactory fact = CertificateFactory.getInstance("X.509"); - try (FileInputStream is = new FileInputStream(certificateFile)) { - return getCertificateHolder( - (X509Certificate) fact.generateCertificate(is)); - } - } - - /** - * Returns the Certificate holder from X509Ceritificate class. - * - * @param x509cert - Certificate class. - * @return X509CertificateHolder - * @throws CertificateEncodingException - on Error. - * @throws IOException - on Error. - */ - public X509CertificateHolder getCertificateHolder(X509Certificate x509cert) - throws CertificateEncodingException, IOException { - return new X509CertificateHolder(x509cert.getEncoded()); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java deleted file mode 100644 index 4971d4ae14f0e..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Certificate Utils. - */ -package org.apache.hadoop.hdds.security.x509.certificate.utils; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java deleted file mode 100644 index 28f853a7f63da..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java +++ /dev/null @@ -1,289 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.certificates.utils; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; -import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil; -import org.apache.logging.log4j.util.Strings; -import org.bouncycastle.asn1.DEROctetString; -import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; -import org.bouncycastle.asn1.x500.X500Name; -import org.bouncycastle.asn1.x509.BasicConstraints; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.asn1.x509.Extensions; -import org.bouncycastle.asn1.x509.GeneralName; -import org.bouncycastle.asn1.x509.GeneralNames; -import org.bouncycastle.asn1.x509.KeyUsage; -import org.bouncycastle.openssl.jcajce.JcaPEMWriter; -import org.bouncycastle.operator.ContentSigner; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.bouncycastle.pkcs.PKCS10CertificationRequestBuilder; -import org.bouncycastle.pkcs.jcajce.JcaPKCS10CertificationRequestBuilder; -import org.bouncycastle.util.io.pem.PemObject; -import org.bouncycastle.util.io.pem.PemReader; - -import java.io.IOException; -import java.io.StringReader; -import java.io.StringWriter; -import java.security.KeyPair; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; - -/** - * A certificate sign request object that wraps operations to build a - * PKCS10CertificationRequest to CertificateServer. - */ -public final class CertificateSignRequest { - private final KeyPair keyPair; - private final SecurityConfig config; - private final Extensions extensions; - private String subject; - private String clusterID; - private String scmID; - - /** - * Private Ctor for CSR. - * - * @param subject - Subject - * @param scmID - SCM ID - * @param clusterID - Cluster ID - * @param keyPair - KeyPair - * @param config - SCM Config - * @param extensions - CSR extensions - */ - private CertificateSignRequest(String subject, String scmID, String clusterID, - KeyPair keyPair, SecurityConfig config, - Extensions extensions) { - this.subject = subject; - this.clusterID = clusterID; - this.scmID = scmID; - this.keyPair = keyPair; - this.config = config; - this.extensions = extensions; - } - - private PKCS10CertificationRequest generateCSR() throws - OperatorCreationException { - X500Name dnName = SecurityUtil.getDistinguishedName(subject, scmID, - clusterID); - PKCS10CertificationRequestBuilder p10Builder = - new JcaPKCS10CertificationRequestBuilder(dnName, keyPair.getPublic()); - - ContentSigner contentSigner = - new JcaContentSignerBuilder(config.getSignatureAlgo()) - .setProvider(config.getProvider()) - .build(keyPair.getPrivate()); - - if (extensions != null) { - p10Builder.addAttribute( - PKCSObjectIdentifiers.pkcs_9_at_extensionRequest, extensions); - } - return p10Builder.build(contentSigner); - } - public static String getEncodedString(PKCS10CertificationRequest request) - throws IOException { - PemObject pemObject = - new PemObject("CERTIFICATE REQUEST", request.getEncoded()); - StringWriter str = new StringWriter(); - try(JcaPEMWriter pemWriter = new JcaPEMWriter(str)) { - pemWriter.writeObject(pemObject); - } - return str.toString(); - } - - - /** - * Gets a CertificateRequest Object from PEM encoded CSR. - * - * @param csr - PEM Encoded Certificate Request String. - * @return PKCS10CertificationRequest - * @throws IOException - On Error. - */ - public static PKCS10CertificationRequest getCertificationRequest(String csr) - throws IOException { - try (PemReader reader = new PemReader(new StringReader(csr))) { - PemObject pemObject = reader.readPemObject(); - if(pemObject.getContent() == null) { - throw new SCMSecurityException("Invalid Certificate signing request"); - } - return new PKCS10CertificationRequest(pemObject.getContent()); - } - } - - /** - * Builder class for Certificate Sign Request. - */ - public static class Builder { - private String subject; - private String clusterID; - private String scmID; - private KeyPair key; - private SecurityConfig config; - private List altNames; - private Boolean ca = false; - private boolean digitalSignature; - private boolean digitalEncryption; - - public CertificateSignRequest.Builder setConfiguration( - Configuration configuration) { - this.config = new SecurityConfig(configuration); - return this; - } - - public CertificateSignRequest.Builder setKey(KeyPair keyPair) { - this.key = keyPair; - return this; - } - - public CertificateSignRequest.Builder setSubject(String subjectString) { - this.subject = subjectString; - return this; - } - - public CertificateSignRequest.Builder setClusterID(String s) { - this.clusterID = s; - return this; - } - - public CertificateSignRequest.Builder setScmID(String s) { - this.scmID = s; - return this; - } - - public Builder setDigitalSignature(boolean dSign) { - this.digitalSignature = dSign; - return this; - } - - public Builder setDigitalEncryption(boolean dEncryption) { - this.digitalEncryption = dEncryption; - return this; - } - - // Support SAN extenion with DNS and RFC822 Name - // other name type will be added as needed. - public CertificateSignRequest.Builder addDnsName(String dnsName) { - Preconditions.checkNotNull(dnsName, "dnsName cannot be null"); - this.addAltName(GeneralName.dNSName, dnsName); - return this; - } - - // IP address is subject to change which is optional for now. - public CertificateSignRequest.Builder addIpAddress(String ip) { - Preconditions.checkNotNull(ip, "Ip address cannot be null"); - this.addAltName(GeneralName.iPAddress, ip); - return this; - } - - private CertificateSignRequest.Builder addAltName(int tag, String name) { - if (altNames == null) { - altNames = new ArrayList<>(); - } - altNames.add(new GeneralName(tag, name)); - return this; - } - - public CertificateSignRequest.Builder setCA(Boolean isCA) { - this.ca = isCA; - return this; - } - - private Extension getKeyUsageExtension() throws IOException { - int keyUsageFlag = KeyUsage.keyAgreement; - if(digitalEncryption){ - keyUsageFlag |= KeyUsage.keyEncipherment | KeyUsage.dataEncipherment; - } - if(digitalSignature) { - keyUsageFlag |= KeyUsage.digitalSignature; - } - - if (ca) { - keyUsageFlag |= KeyUsage.keyCertSign | KeyUsage.cRLSign; - } - KeyUsage keyUsage = new KeyUsage(keyUsageFlag); - return new Extension(Extension.keyUsage, true, - new DEROctetString(keyUsage)); - } - - private Optional getSubjectAltNameExtension() throws - IOException { - if (altNames != null) { - return Optional.of(new Extension(Extension.subjectAlternativeName, - false, new DEROctetString(new GeneralNames( - altNames.toArray(new GeneralName[altNames.size()]))))); - } - return Optional.empty(); - } - - private Extension getBasicExtension() throws IOException { - // We don't set pathLenConstraint means no limit is imposed. - return new Extension(Extension.basicConstraints, - true, new DEROctetString(new BasicConstraints(ca))); - } - - private Extensions createExtensions() throws IOException { - List extensions = new ArrayList<>(); - - // Add basic extension - if(ca) { - extensions.add(getBasicExtension()); - } - - // Add key usage extension - extensions.add(getKeyUsageExtension()); - - // Add subject alternate name extension - Optional san = getSubjectAltNameExtension(); - if (san.isPresent()) { - extensions.add(san.get()); - } - - return new Extensions( - extensions.toArray(new Extension[extensions.size()])); - } - - public PKCS10CertificationRequest build() throws SCMSecurityException { - Preconditions.checkNotNull(key, "KeyPair cannot be null"); - Preconditions.checkArgument(Strings.isNotBlank(subject), "Subject " + - "cannot be blank"); - - try { - CertificateSignRequest csr = new CertificateSignRequest(subject, scmID, - clusterID, key, config, createExtensions()); - return csr.generateCSR(); - } catch (IOException ioe) { - throw new CertificateException(String.format("Unable to create " + - "extension for certificate sign request for %s.", SecurityUtil - .getDistinguishedName(subject, scmID, clusterID)), ioe.getCause()); - } catch (OperatorCreationException ex) { - throw new CertificateException(String.format("Unable to create " + - "certificate sign request for %s.", SecurityUtil - .getDistinguishedName(subject, scmID, clusterID)), - ex.getCause()); - } - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java deleted file mode 100644 index 1fd6d7c9af640..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificates.utils; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; -import org.apache.hadoop.util.Time; -import org.apache.logging.log4j.util.Strings; -import org.bouncycastle.asn1.DEROctetString; -import org.bouncycastle.asn1.x500.X500Name; -import org.bouncycastle.asn1.x509.BasicConstraints; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.asn1.x509.KeyUsage; -import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo; -import org.bouncycastle.cert.CertIOException; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.cert.X509v3CertificateBuilder; -import org.bouncycastle.operator.ContentSigner; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; - -import java.io.IOException; -import java.math.BigInteger; -import java.security.KeyPair; -import java.time.Duration; -import java.time.LocalDate; -import java.time.LocalTime; -import java.time.ZoneOffset; -import java.util.Date; - -/** - * A Self Signed Certificate with CertificateServer basic constraint can be used - * to bootstrap a certificate infrastructure, if no external certificate is - * provided. - */ -public final class SelfSignedCertificate { - private static final String NAME_FORMAT = "CN=%s,OU=%s,O=%s"; - private String subject; - private String clusterID; - private String scmID; - private LocalDate beginDate; - private LocalDate endDate; - private KeyPair key; - private SecurityConfig config; - - /** - * Private Ctor invoked only via Builder Interface. - * - * @param subject - Subject - * @param scmID - SCM ID - * @param clusterID - Cluster ID - * @param beginDate - NotBefore - * @param endDate - Not After - * @param configuration - SCM Config - * @param keyPair - KeyPair - */ - private SelfSignedCertificate(String subject, String scmID, String clusterID, - LocalDate beginDate, LocalDate endDate, SecurityConfig configuration, - KeyPair keyPair) { - this.subject = subject; - this.clusterID = clusterID; - this.scmID = scmID; - this.beginDate = beginDate; - this.endDate = endDate; - config = configuration; - this.key = keyPair; - } - - @VisibleForTesting - public static String getNameFormat() { - return NAME_FORMAT; - } - - public static Builder newBuilder() { - return new Builder(); - } - - private X509CertificateHolder generateCertificate(boolean isCA) - throws OperatorCreationException, IOException { - // For the Root Certificate we form the name from Subject, SCM ID and - // Cluster ID. - String dnName = String.format(getNameFormat(), subject, scmID, clusterID); - X500Name name = new X500Name(dnName); - byte[] encoded = key.getPublic().getEncoded(); - SubjectPublicKeyInfo publicKeyInfo = - SubjectPublicKeyInfo.getInstance(encoded); - - - ContentSigner contentSigner = - new JcaContentSignerBuilder(config.getSignatureAlgo()) - .setProvider(config.getProvider()).build(key.getPrivate()); - - // Please note: Since this is a root certificate we use "ONE" as the - // serial number. Also note that skip enforcing locale or UTC. We are - // trying to operate at the Days level, hence Time zone is also skipped for - // now. - BigInteger serial = BigInteger.ONE; - if (!isCA) { - serial = new BigInteger(Long.toString(Time.monotonicNow())); - } - - ZoneOffset zoneOffset = - beginDate.atStartOfDay(ZoneOffset.systemDefault()).getOffset(); - - // Valid from the Start of the day when we generate this Certificate. - Date validFrom = - Date.from(beginDate.atTime(LocalTime.MIN).toInstant(zoneOffset)); - - // Valid till end day finishes. - Date validTill = - Date.from(endDate.atTime(LocalTime.MAX).toInstant(zoneOffset)); - - X509v3CertificateBuilder builder = new X509v3CertificateBuilder(name, - serial, validFrom, validTill, name, publicKeyInfo); - - if (isCA) { - builder.addExtension(Extension.basicConstraints, true, - new BasicConstraints(true)); - int keyUsageFlag = KeyUsage.keyCertSign | KeyUsage.cRLSign; - KeyUsage keyUsage = new KeyUsage(keyUsageFlag); - builder.addExtension(Extension.keyUsage, false, - new DEROctetString(keyUsage)); - } - return builder.build(contentSigner); - } - - /** - * Builder class for Root Certificates. - */ - public static class Builder { - private String subject; - private String clusterID; - private String scmID; - private LocalDate beginDate; - private LocalDate endDate; - private KeyPair key; - private SecurityConfig config; - private boolean isCA; - - public Builder setConfiguration(Configuration configuration) { - this.config = new SecurityConfig(configuration); - return this; - } - - public Builder setKey(KeyPair keyPair) { - this.key = keyPair; - return this; - } - - public Builder setSubject(String subjectString) { - this.subject = subjectString; - return this; - } - - public Builder setClusterID(String s) { - this.clusterID = s; - return this; - } - - public Builder setScmID(String s) { - this.scmID = s; - return this; - } - - public Builder setBeginDate(LocalDate date) { - this.beginDate = date; - return this; - } - - public Builder setEndDate(LocalDate date) { - this.endDate = date; - return this; - } - - public Builder makeCA() { - isCA = true; - return this; - } - - public X509CertificateHolder build() - throws SCMSecurityException, IOException { - Preconditions.checkNotNull(key, "Key cannot be null"); - Preconditions.checkArgument(Strings.isNotBlank(subject), "Subject " + - "cannot be blank"); - Preconditions.checkArgument(Strings.isNotBlank(clusterID), "Cluster ID " + - "cannot be blank"); - Preconditions.checkArgument(Strings.isNotBlank(scmID), "SCM ID cannot " + - "be blank"); - - Preconditions.checkArgument(beginDate.isBefore(endDate), "Certificate " + - "begin date should be before end date"); - - // We just read the beginDate and EndDate as Start of the Day and - // confirm that we do not violate the maxDuration Config. - Duration certDuration = Duration.between(beginDate.atStartOfDay(), - endDate.atStartOfDay()); - Duration maxDuration = config.getMaxCertificateDuration(); - if (certDuration.compareTo(maxDuration) > 0) { - throw new SCMSecurityException("The cert duration violates the " + - "maximum configured value. Please check the hdds.x509.max" + - ".duration config key. Current Value: " + certDuration + - " config: " + maxDuration); - } - - SelfSignedCertificate rootCertificate = - new SelfSignedCertificate(this.subject, - this.scmID, this.clusterID, this.beginDate, this.endDate, - this.config, key); - try { - return rootCertificate.generateCertificate(isCA); - } catch (OperatorCreationException | CertIOException e) { - throw new CertificateException("Unable to create root certificate.", - e.getCause()); - } - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/package-info.java deleted file mode 100644 index e7110e3125164..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - Helpers for Certificates. - */ -package org.apache.hadoop.hdds.security.x509.certificates.utils; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/CertificateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/CertificateException.java deleted file mode 100644 index b3121283b18eb..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/CertificateException.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.exceptions; - -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; - -/** - * Certificate Exceptions from the SCM Security layer. - */ -public class CertificateException extends SCMSecurityException { - - private ErrorCode errorCode; - /** - * Ctor. - * @param message - Error Message. - */ - public CertificateException(String message) { - super(message); - } - - /** - * Ctor. - * @param message - Message. - * @param cause - Actual cause. - */ - public CertificateException(String message, Throwable cause) { - super(message, cause); - } - - /** - * Ctor. - * @param message - Message. - * @param cause - Actual cause. - * @param errorCode - */ - public CertificateException(String message, Throwable cause, - ErrorCode errorCode) { - super(message, cause); - this.errorCode = errorCode; - } - - /** - * Ctor. - * @param message - Message. - * @param errorCode - */ - public CertificateException(String message, ErrorCode errorCode) { - super(message); - this.errorCode = errorCode; - } - - /** - * Ctor. - * @param cause - Base Exception. - */ - public CertificateException(Throwable cause) { - super(cause); - } - - /** - * Error codes to make it easy to decode these exceptions. - */ - public enum ErrorCode { - KEYSTORE_ERROR, - CRYPTO_SIGN_ERROR, - CERTIFICATE_ERROR, - BOOTSTRAP_ERROR, - CSR_ERROR, - CRYPTO_SIGNATURE_VERIFICATION_ERROR, - CERTIFICATE_NOT_FOUND_ERROR - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/package-info.java deleted file mode 100644 index afcc474ad111a..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Exceptions thrown by X.509 security classes. - */ -package org.apache.hadoop.hdds.security.x509.exceptions; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java deleted file mode 100644 index 640f5ca0b9462..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.keys; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.security.KeyPair; -import java.security.KeyPairGenerator; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; - -/** - * A class to generate Key Pair for use with Certificates. - */ -public class HDDSKeyGenerator { - private static final Logger LOG = - LoggerFactory.getLogger(HDDSKeyGenerator.class); - private final SecurityConfig securityConfig; - - /** - * Constructor for HDDSKeyGenerator. - * - * @param configuration - config - */ - public HDDSKeyGenerator(Configuration configuration) { - this.securityConfig = new SecurityConfig(configuration); - } - - /** - * Constructor that takes a SecurityConfig as the Argument. - * - * @param config - SecurityConfig - */ - public HDDSKeyGenerator(SecurityConfig config) { - this.securityConfig = config; - } - - /** - * Returns the Security config used for this object. - * - * @return SecurityConfig - */ - public SecurityConfig getSecurityConfig() { - return securityConfig; - } - - /** - * Use Config to generate key. - * - * @return KeyPair - * @throws NoSuchProviderException - On Error, due to missing Java - * dependencies. - * @throws NoSuchAlgorithmException - On Error, due to missing Java - * dependencies. - */ - public KeyPair generateKey() throws NoSuchProviderException, - NoSuchAlgorithmException { - return generateKey(securityConfig.getSize(), - securityConfig.getKeyAlgo(), securityConfig.getProvider()); - } - - /** - * Specify the size -- all other parameters are used from config. - * - * @param size - int, valid key sizes. - * @return KeyPair - * @throws NoSuchProviderException - On Error, due to missing Java - * dependencies. - * @throws NoSuchAlgorithmException - On Error, due to missing Java - * dependencies. - */ - public KeyPair generateKey(int size) throws - NoSuchProviderException, NoSuchAlgorithmException { - return generateKey(size, - securityConfig.getKeyAlgo(), securityConfig.getProvider()); - } - - /** - * Custom Key Generation, all values are user provided. - * - * @param size - Key Size - * @param algorithm - Algorithm to use - * @param provider - Security provider. - * @return KeyPair. - * @throws NoSuchProviderException - On Error, due to missing Java - * dependencies. - * @throws NoSuchAlgorithmException - On Error, due to missing Java - * dependencies. - */ - public KeyPair generateKey(int size, String algorithm, String provider) - throws NoSuchProviderException, NoSuchAlgorithmException { - if (LOG.isDebugEnabled()) { - LOG.debug("Generating key pair using size:{}, Algorithm:{}, Provider:{}", - size, algorithm, provider); - } - KeyPairGenerator generator = KeyPairGenerator - .getInstance(algorithm, provider); - generator.initialize(size); - return generator.generateKeyPair(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java deleted file mode 100644 index 82873b06c7146..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java +++ /dev/null @@ -1,398 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.keys; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.output.FileWriterWithEncoding; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.bouncycastle.util.io.pem.PemObject; -import org.bouncycastle.util.io.pem.PemReader; -import org.bouncycastle.util.io.pem.PemWriter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.io.StringReader; -import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; -import java.nio.file.FileSystems; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.attribute.PosixFilePermission; -import java.security.KeyFactory; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.spec.InvalidKeySpecException; -import java.security.spec.PKCS8EncodedKeySpec; -import java.security.spec.X509EncodedKeySpec; -import java.util.Set; -import java.util.function.Supplier; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static java.nio.file.attribute.PosixFilePermission.OWNER_EXECUTE; -import static java.nio.file.attribute.PosixFilePermission.OWNER_READ; -import static java.nio.file.attribute.PosixFilePermission.OWNER_WRITE; - -/** - * We store all Key material in good old PEM files. This helps in avoiding - * dealing will persistent Java KeyStore issues. Also when debugging, general - * tools like OpenSSL can be used to read and decode these files. - */ -public class KeyCodec { - public final static String PRIVATE_KEY = "PRIVATE KEY"; - public final static String PUBLIC_KEY = "PUBLIC KEY"; - public final static Charset DEFAULT_CHARSET = StandardCharsets.UTF_8; - private final static Logger LOG = - LoggerFactory.getLogger(KeyCodec.class); - private final Path location; - private final SecurityConfig securityConfig; - private Set permissionSet = - Stream.of(OWNER_READ, OWNER_WRITE, OWNER_EXECUTE) - .collect(Collectors.toSet()); - private Supplier isPosixFileSystem; - - /** - * Creates a KeyCodec with component name. - * - * @param config - Security Config. - * @param component - Component String. - */ - public KeyCodec(SecurityConfig config, String component) { - this.securityConfig = config; - isPosixFileSystem = KeyCodec::isPosix; - this.location = securityConfig.getKeyLocation(component); - } - - /** - * Checks if File System supports posix style security permissions. - * - * @return True if it supports posix. - */ - private static Boolean isPosix() { - return FileSystems.getDefault().supportedFileAttributeViews() - .contains("posix"); - } - - /** - * Returns the Permission set. - * - * @return Set - */ - @VisibleForTesting - public Set getPermissionSet() { - return permissionSet; - } - - /** - * Returns the Security config used for this object. - * - * @return SecurityConfig - */ - public SecurityConfig getSecurityConfig() { - return securityConfig; - } - - /** - * This function is used only for testing. - * - * @param isPosixFileSystem - Sets a boolean function for mimicking files - * systems that are not posix. - */ - @VisibleForTesting - public void setIsPosixFileSystem(Supplier isPosixFileSystem) { - this.isPosixFileSystem = isPosixFileSystem; - } - - /** - * Writes a given key using the default config options. - * - * @param keyPair - Key Pair to write to file. - * @throws IOException - On I/O failure. - */ - public void writeKey(KeyPair keyPair) throws IOException { - writeKey(location, keyPair, securityConfig.getPrivateKeyFileName(), - securityConfig.getPublicKeyFileName(), false); - } - - /** - * Writes a given private key using the default config options. - * - * @param key - Key to write to file. - * @throws IOException - On I/O failure. - */ - public void writePrivateKey(PrivateKey key) throws IOException { - File privateKeyFile = - Paths.get(location.toString(), - securityConfig.getPrivateKeyFileName()).toFile(); - - if (Files.exists(privateKeyFile.toPath())) { - throw new IOException("Private key already exist."); - } - - try (PemWriter privateKeyWriter = new PemWriter(new - FileWriterWithEncoding(privateKeyFile, DEFAULT_CHARSET))) { - privateKeyWriter.writeObject( - new PemObject(PRIVATE_KEY, key.getEncoded())); - } - Files.setPosixFilePermissions(privateKeyFile.toPath(), permissionSet); - } - - /** - * Writes a given public key using the default config options. - * - * @param key - Key to write to file. - * @throws IOException - On I/O failure. - */ - public void writePublicKey(PublicKey key) throws IOException { - File publicKeyFile = Paths.get(location.toString(), - securityConfig.getPublicKeyFileName()).toFile(); - - if (Files.exists(publicKeyFile.toPath())) { - throw new IOException("Private key already exist."); - } - - try (PemWriter keyWriter = new PemWriter(new - FileWriterWithEncoding(publicKeyFile, DEFAULT_CHARSET))) { - keyWriter.writeObject( - new PemObject(PUBLIC_KEY, key.getEncoded())); - } - Files.setPosixFilePermissions(publicKeyFile.toPath(), permissionSet); - } - - /** - * Writes a given key using default config options. - * - * @param keyPair - Key pair to write - * @param overwrite - Overwrites the keys if they already exist. - * @throws IOException - On I/O failure. - */ - public void writeKey(KeyPair keyPair, boolean overwrite) throws IOException { - writeKey(location, keyPair, securityConfig.getPrivateKeyFileName(), - securityConfig.getPublicKeyFileName(), overwrite); - } - - /** - * Writes a given key using default config options. - * - * @param basePath - The location to write to, override the config values. - * @param keyPair - Key pair to write - * @param overwrite - Overwrites the keys if they already exist. - * @throws IOException - On I/O failure. - */ - public void writeKey(Path basePath, KeyPair keyPair, boolean overwrite) - throws IOException { - writeKey(basePath, keyPair, securityConfig.getPrivateKeyFileName(), - securityConfig.getPublicKeyFileName(), overwrite); - } - - /** - * Reads a Private Key from the PEM Encoded Store. - * - * @param basePath - Base Path, Directory where the Key is stored. - * @param keyFileName - File Name of the private key - * @return PrivateKey Object. - * @throws IOException - on Error. - */ - private PKCS8EncodedKeySpec readKey(Path basePath, String keyFileName) - throws IOException { - File fileName = Paths.get(basePath.toString(), keyFileName).toFile(); - String keyData = FileUtils.readFileToString(fileName, DEFAULT_CHARSET); - final byte[] pemContent; - try (PemReader pemReader = new PemReader(new StringReader(keyData))) { - PemObject keyObject = pemReader.readPemObject(); - pemContent = keyObject.getContent(); - } - return new PKCS8EncodedKeySpec(pemContent); - } - - /** - * Returns a Private Key from a PEM encoded file. - * - * @param basePath - base path - * @param privateKeyFileName - private key file name. - * @return PrivateKey - * @throws InvalidKeySpecException - on Error. - * @throws NoSuchAlgorithmException - on Error. - * @throws IOException - on Error. - */ - public PrivateKey readPrivateKey(Path basePath, String privateKeyFileName) - throws InvalidKeySpecException, NoSuchAlgorithmException, IOException { - PKCS8EncodedKeySpec encodedKeySpec = readKey(basePath, privateKeyFileName); - final KeyFactory keyFactory = - KeyFactory.getInstance(securityConfig.getKeyAlgo()); - return - keyFactory.generatePrivate(encodedKeySpec); - } - - /** - * Read the Public Key using defaults. - * @return PublicKey. - * @throws InvalidKeySpecException - On Error. - * @throws NoSuchAlgorithmException - On Error. - * @throws IOException - On Error. - */ - public PublicKey readPublicKey() throws InvalidKeySpecException, - NoSuchAlgorithmException, IOException { - return readPublicKey(this.location.toAbsolutePath(), - securityConfig.getPublicKeyFileName()); - } - - /** - * Returns a public key from a PEM encoded file. - * - * @param basePath - base path. - * @param publicKeyFileName - public key file name. - * @return PublicKey - * @throws NoSuchAlgorithmException - on Error. - * @throws InvalidKeySpecException - on Error. - * @throws IOException - on Error. - */ - public PublicKey readPublicKey(Path basePath, String publicKeyFileName) - throws NoSuchAlgorithmException, InvalidKeySpecException, IOException { - PKCS8EncodedKeySpec encodedKeySpec = readKey(basePath, publicKeyFileName); - final KeyFactory keyFactory = - KeyFactory.getInstance(securityConfig.getKeyAlgo()); - return - keyFactory.generatePublic( - new X509EncodedKeySpec(encodedKeySpec.getEncoded())); - - } - - - /** - * Returns the private key using defaults. - * @return PrivateKey. - * @throws InvalidKeySpecException - On Error. - * @throws NoSuchAlgorithmException - On Error. - * @throws IOException - On Error. - */ - public PrivateKey readPrivateKey() throws InvalidKeySpecException, - NoSuchAlgorithmException, IOException { - return readPrivateKey(this.location.toAbsolutePath(), - securityConfig.getPrivateKeyFileName()); - } - - - /** - * Helper function that actually writes data to the files. - * - * @param basePath - base path to write key - * @param keyPair - Key pair to write to file. - * @param privateKeyFileName - private key file name. - * @param publicKeyFileName - public key file name. - * @param force - forces overwriting the keys. - * @throws IOException - On I/O failure. - */ - private synchronized void writeKey(Path basePath, KeyPair keyPair, - String privateKeyFileName, String publicKeyFileName, boolean force) - throws IOException { - checkPreconditions(basePath); - - File privateKeyFile = - Paths.get(location.toString(), privateKeyFileName).toFile(); - File publicKeyFile = - Paths.get(location.toString(), publicKeyFileName).toFile(); - checkKeyFile(privateKeyFile, force, publicKeyFile); - - try (PemWriter privateKeyWriter = new PemWriter(new - FileWriterWithEncoding(privateKeyFile, DEFAULT_CHARSET))) { - privateKeyWriter.writeObject( - new PemObject(PRIVATE_KEY, keyPair.getPrivate().getEncoded())); - } - - try (PemWriter publicKeyWriter = new PemWriter(new - FileWriterWithEncoding(publicKeyFile, DEFAULT_CHARSET))) { - publicKeyWriter.writeObject( - new PemObject(PUBLIC_KEY, keyPair.getPublic().getEncoded())); - } - Files.setPosixFilePermissions(privateKeyFile.toPath(), permissionSet); - Files.setPosixFilePermissions(publicKeyFile.toPath(), permissionSet); - } - - /** - * Checks if private and public key file already exists. Throws IOException if - * file exists and force flag is set to false, else will delete the existing - * file. - * - * @param privateKeyFile - Private key file. - * @param force - forces overwriting the keys. - * @param publicKeyFile - public key file. - * @throws IOException - On I/O failure. - */ - private void checkKeyFile(File privateKeyFile, boolean force, - File publicKeyFile) throws IOException { - if (privateKeyFile.exists() && force) { - if (!privateKeyFile.delete()) { - throw new IOException("Unable to delete private key file."); - } - } - - if (publicKeyFile.exists() && force) { - if (!publicKeyFile.delete()) { - throw new IOException("Unable to delete public key file."); - } - } - - if (privateKeyFile.exists()) { - throw new IOException("Private Key file already exists."); - } - - if (publicKeyFile.exists()) { - throw new IOException("Public Key file already exists."); - } - } - - /** - * Checks if base path exists and sets file permissions. - * - * @param basePath - base path to write key - * @throws IOException - On I/O failure. - */ - private void checkPreconditions(Path basePath) throws IOException { - Preconditions.checkNotNull(basePath, "Base path cannot be null"); - if (!isPosixFileSystem.get()) { - LOG.error("Keys cannot be stored securely without POSIX file system " - + "support for now."); - throw new IOException("Unsupported File System for pem file."); - } - - if (Files.exists(basePath)) { - // Not the end of the world if we reset the permissions on an existing - // directory. - Files.setPosixFilePermissions(basePath, permissionSet); - } else { - boolean success = basePath.toFile().mkdirs(); - if (!success) { - LOG.error("Unable to create the directory for the " - + "location. Location: {}", basePath); - throw new IOException("Unable to create the directory for the " - + "location. Location:" + basePath); - } - Files.setPosixFilePermissions(basePath, permissionSet); - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java deleted file mode 100644 index 6147d3a990122..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.keys; - -import java.security.KeyFactory; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.spec.InvalidKeySpecException; -import java.security.spec.PKCS8EncodedKeySpec; -import java.security.spec.X509EncodedKeySpec; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; -import org.bouncycastle.asn1.ASN1ObjectIdentifier; -import org.bouncycastle.asn1.ASN1Sequence; -import org.bouncycastle.asn1.ASN1Set; -import org.bouncycastle.asn1.pkcs.Attribute; -import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; -import org.bouncycastle.asn1.x500.X500Name; -import org.bouncycastle.asn1.x509.Extensions; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; - -/** - * Utility functions for Security modules for Ozone. - */ -public final class SecurityUtil { - - // Ozone Certificate distinguished format: (CN=Subject,OU=ScmID,O=ClusterID). - private static final String DISTINGUISHED_NAME_FORMAT = "CN=%s,OU=%s,O=%s"; - - private SecurityUtil() { - } - - public static String getDistinguishedNameFormat() { - return DISTINGUISHED_NAME_FORMAT; - } - - public static X500Name getDistinguishedName(String subject, String scmID, - String clusterID) { - return new X500Name(String.format(getDistinguishedNameFormat(), subject, - scmID, clusterID)); - } - - // TODO: move the PKCS10CSRValidator class - public static Extensions getPkcs9Extensions(PKCS10CertificationRequest csr) - throws CertificateException { - ASN1Set pkcs9ExtReq = getPkcs9ExtRequest(csr); - Object extReqElement = pkcs9ExtReq.getObjects().nextElement(); - if (extReqElement instanceof Extensions) { - return (Extensions) extReqElement; - } else { - if (extReqElement instanceof ASN1Sequence) { - return Extensions.getInstance((ASN1Sequence) extReqElement); - } else { - throw new CertificateException("Unknown element type :" + extReqElement - .getClass().getSimpleName()); - } - } - } - - public static ASN1Set getPkcs9ExtRequest(PKCS10CertificationRequest csr) - throws CertificateException { - for (Attribute attr : csr.getAttributes()) { - ASN1ObjectIdentifier oid = attr.getAttrType(); - if (oid.equals(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest)) { - return attr.getAttrValues(); - } - } - throw new CertificateException("No PKCS#9 extension found in CSR"); - } - - /* - * Returns private key created from encoded key. - * @return private key if successful else returns null. - */ - public static PrivateKey getPrivateKey(byte[] encodedKey, - SecurityConfig secureConfig) { - PrivateKey pvtKey = null; - if (encodedKey == null || encodedKey.length == 0) { - return null; - } - - try { - KeyFactory kf = null; - - kf = KeyFactory.getInstance(secureConfig.getKeyAlgo(), - secureConfig.getProvider()); - pvtKey = kf.generatePrivate(new PKCS8EncodedKeySpec(encodedKey)); - - } catch (NoSuchAlgorithmException | InvalidKeySpecException | - NoSuchProviderException e) { - return null; - } - return pvtKey; - } - - /* - * Returns public key created from encoded key. - * @return public key if successful else returns null. - */ - public static PublicKey getPublicKey(byte[] encodedKey, - SecurityConfig secureConfig) { - PublicKey key = null; - if (encodedKey == null || encodedKey.length == 0) { - return null; - } - - try { - KeyFactory kf = null; - kf = KeyFactory.getInstance(secureConfig.getKeyAlgo(), - secureConfig.getProvider()); - key = kf.generatePublic(new X509EncodedKeySpec(encodedKey)); - - } catch (NoSuchAlgorithmException | InvalidKeySpecException | - NoSuchProviderException e) { - return null; - } - return key; - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java deleted file mode 100644 index 37a04d6c084b5..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Utils for private and public keys. - */ -package org.apache.hadoop.hdds.security.x509.keys; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java deleted file mode 100644 index a6369c68308f0..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - - -/** - * This package contains common routines used in creating an x509 based identity - * framework for HDDS. - */ -package org.apache.hadoop.hdds.security.x509; -/* - -Architecture of Certificate Infrastructure for SCM. -==================================================== - -The certificate infrastructure has two main parts, the certificate server or -the Certificate authority and the clients who want certificates. The CA is -responsible for issuing certificates to participating entities. - -To issue a certificate the CA has to verify the identity and the assertions -in the certificate. The client starts off making a request to CA for a -certificate. This request is called Certificate Signing Request or CSR -(PKCS#10). - -When a CSR arrives on the CA, CA will decode the CSR and verify that all the -fields in the CSR are in line with what the system expects. Since there are -lots of possible ways to construct an X.509 certificate, we rely on PKI -profiles. - -Generally, PKI profiles are policy documents or general guidelines that get -followed by the requester and CA. However, most of the PKI profiles that are -commonly available are general purpose and offers too much surface area. - -SCM CA infrastructure supports the notion of a PKI profile class which can -codify the RDNs, Extensions and other certificate policies. The CA when -issuing a certificate will invoke a certificate approver class, based on the -authentication method used. For example, out of the box, we support manual, -Kerberos, trusted network and testing authentication mechanisms. - -If there is no authentication mechanism in place, then when CA receives the -CSR, it runs the standard PKI profile over it verify that all the fields are -in expected ranges. Once that is done, The signing request is sent for human -review and approval. This form of certificate approval is called Manual, Of -all the certificate approval process this is the ** most secure **. This -approval needs to be done once for each data node. - -For existing clusters, where data nodes already have a Kerberos keytab, we -can leverage the Kerberos identity mechanism to identify the data node that -is requesting the certificate. In this case, users can configure the system -to leverage Kerberos while issuing certificates and SCM CA will be able to -verify the data nodes identity and issue certificates automatically. - -In environments like Kubernetes, we can leverage the base system services to -pass on a shared secret securely. In this model also, we can rely on these -secrets to make sure that is the right data node that is talking to us. This -kind of approval is called a Trusted network approval. In this process, each -data node not only sends the CSR but signs the request with a shared secret -with SCM. SCM then can issue a certificate without the intervention of a -human administrator. - -The last, TESTING method which never should be used other than in development - and testing clusters, is merely a mechanism to bypass all identity checks. If -this flag is setup, then CA will issue a CSR if the base approves all fields. - - * Please do not use this mechanism(TESTING) for any purpose other than - * testing. - -CA - Certificate Approval and Code Layout (as of Dec, 1st, 2018) -================================================================= -The CA implementation ( as of now it is called DefaultCA) receives a CSR from - the network layer. The network also tells the system what approver type to - use, that is if Kerberos or Shared secrets mechanism is used, it reports - that to Default CA. - -The default CA instantiates the approver based on the type of the approver -indicated by the network layer. This approver creates an instance of the PKI -profile and passes each field from the certificate signing request. The PKI -profile (as of today Dec 1st, 2018, we have one profile called Ozone profile) - verifies that each field in the CSR meets the approved set of values. - -Once the PKI Profile validates the request, it is either auto approved or -queued for manual review. - - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcClientInterceptor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcClientInterceptor.java deleted file mode 100644 index 58270baabcc0e..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcClientInterceptor.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.tracing; - -import org.apache.ratis.thirdparty.io.grpc.CallOptions; -import org.apache.ratis.thirdparty.io.grpc.Channel; -import org.apache.ratis.thirdparty.io.grpc.ClientCall; -import org.apache.ratis.thirdparty.io.grpc.ClientInterceptor; -import org.apache.ratis.thirdparty.io.grpc.ForwardingClientCall.SimpleForwardingClientCall; -import org.apache.ratis.thirdparty.io.grpc.Metadata; -import org.apache.ratis.thirdparty.io.grpc.Metadata.Key; -import org.apache.ratis.thirdparty.io.grpc.MethodDescriptor; - -/** - * Interceptor to add the tracing id to the outgoing call header. - */ -public class GrpcClientInterceptor implements ClientInterceptor { - - public static final Key TRACING_HEADER = - Key.of("Tracing", Metadata.ASCII_STRING_MARSHALLER); - - @Override - public ClientCall interceptCall( - MethodDescriptor method, CallOptions callOptions, - Channel next) { - - return new SimpleForwardingClientCall( - next.newCall(method, callOptions)) { - - @Override - public void start(Listener responseListener, Metadata headers) { - - Metadata tracingHeaders = new Metadata(); - tracingHeaders.put(TRACING_HEADER, TracingUtil.exportCurrentSpan()); - - headers.merge(tracingHeaders); - - super.start(responseListener, headers); - } - }; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java deleted file mode 100644 index b63af12b3fab7..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.tracing; - -import io.opentracing.Scope; -import org.apache.ratis.thirdparty.io.grpc.ForwardingServerCallListener.SimpleForwardingServerCallListener; -import org.apache.ratis.thirdparty.io.grpc.Metadata; -import org.apache.ratis.thirdparty.io.grpc.ServerCall; -import org.apache.ratis.thirdparty.io.grpc.ServerCall.Listener; -import org.apache.ratis.thirdparty.io.grpc.ServerCallHandler; -import org.apache.ratis.thirdparty.io.grpc.ServerInterceptor; - -/** - * Interceptor to add the tracing id to the outgoing call header. - */ -public class GrpcServerInterceptor implements ServerInterceptor { - - @Override - public Listener interceptCall( - ServerCall call, Metadata headers, - ServerCallHandler next) { - - return new SimpleForwardingServerCallListener( - next.startCall(call, headers)) { - @Override - public void onMessage(ReqT message) { - try (Scope scope = TracingUtil - .importAndCreateScope( - call.getMethodDescriptor().getFullMethodName(), - headers.get(GrpcClientInterceptor.TRACING_HEADER))) { - super.onMessage(message); - } - } - }; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java deleted file mode 100644 index 56d59ea6f1a32..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.tracing; - -import java.math.BigInteger; - -import io.jaegertracing.internal.JaegerSpanContext; -import io.jaegertracing.internal.exceptions.EmptyTracerStateStringException; -import io.jaegertracing.internal.exceptions.MalformedTracerStateStringException; -import io.jaegertracing.internal.exceptions.TraceIdOutOfBoundException; -import io.jaegertracing.spi.Codec; -import io.opentracing.propagation.Format; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A jaeger codec to save the current tracing context as a string. - */ -public class StringCodec implements Codec { - - public static final Logger LOG = LoggerFactory.getLogger(StringCodec.class); - public static final StringFormat FORMAT = new StringFormat(); - - @Override - public JaegerSpanContext extract(StringBuilder s) { - if (s == null) { - throw new EmptyTracerStateStringException(); - } - String value = s.toString(); - if (value != null && !value.equals("")) { - String[] parts = value.split(":"); - if (parts.length != 4) { - if (LOG.isDebugEnabled()) { - LOG.debug("MalformedTracerStateString: {}", value); - } - throw new MalformedTracerStateStringException(value); - } else { - String traceId = parts[0]; - if (traceId.length() <= 32 && traceId.length() >= 1) { - return new JaegerSpanContext(high(traceId), - (new BigInteger(traceId, 16)).longValue(), - (new BigInteger(parts[1], 16)).longValue(), - (new BigInteger(parts[2], 16)).longValue(), - (new BigInteger(parts[3], 16)).byteValue()); - } else { - throw new TraceIdOutOfBoundException( - "Trace id [" + traceId + "] length is not withing 1 and 32"); - } - } - } else { - throw new EmptyTracerStateStringException(); - } - } - - @Override - public void inject(JaegerSpanContext context, - StringBuilder string) { - int intFlag = context.getFlags() & 255; - string.append( - context.getTraceId() + ":" + Long.toHexString(context.getSpanId()) - + ":" + Long.toHexString(context.getParentId()) + ":" + Integer - .toHexString(intFlag)); - } - - private static long high(String hexString) { - if (hexString.length() > 16) { - int highLength = hexString.length() - 16; - String highString = hexString.substring(0, highLength); - return (new BigInteger(highString, 16)).longValue(); - } else { - return 0L; - } - } - - /** - * The format to save the context as text. - *

- * Using the mutable StringBuilder instead of plain String. - */ - public static final class StringFormat implements Format { - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java deleted file mode 100644 index 8bdf638acfc91..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.tracing; - -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.Method; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; -import java.util.Map.Entry; - -import io.opentracing.Scope; -import io.opentracing.util.GlobalTracer; - -/** - * A Java proxy invocation handler to trace all the methods of the delegate - * class. - * - * @param - */ -public class TraceAllMethod implements InvocationHandler { - - /** - * Cache for all the method objects of the delegate class. - */ - private final Map[], Method>> methods = new HashMap<>(); - - private T delegate; - - private String name; - - public TraceAllMethod(T delegate, String name) { - this.delegate = delegate; - this.name = name; - for (Method method : delegate.getClass().getDeclaredMethods()) { - if (!methods.containsKey(method.getName())) { - methods.put(method.getName(), new HashMap<>()); - } - methods.get(method.getName()).put(method.getParameterTypes(), method); - } - } - - @Override - public Object invoke(Object proxy, Method method, Object[] args) - throws Throwable { - Method delegateMethod = findDelegatedMethod(method); - try (Scope scope = GlobalTracer.get().buildSpan( - name + "." + method.getName()) - .startActive(true)) { - try { - return delegateMethod.invoke(delegate, args); - } catch (Exception ex) { - if (ex.getCause() != null) { - throw ex.getCause(); - } else { - throw ex; - } - } - } - } - - private Method findDelegatedMethod(Method method) { - for (Entry[], Method> entry : methods.get(method.getName()) - .entrySet()) { - if (Arrays.equals(entry.getKey(), method.getParameterTypes())) { - return entry.getValue(); - } - } - return null; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java deleted file mode 100644 index 8e82a375abb45..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.tracing; - -import java.lang.reflect.Proxy; - -import io.jaegertracing.Configuration; -import io.jaegertracing.internal.JaegerTracer; -import io.opentracing.Scope; -import io.opentracing.Span; -import io.opentracing.SpanContext; -import io.opentracing.Tracer; -import io.opentracing.util.GlobalTracer; - -import org.apache.hadoop.hdds.scm.ScmConfigKeys; - -/** - * Utility class to collect all the tracing helper methods. - */ -public final class TracingUtil { - - private static final String NULL_SPAN_AS_STRING = ""; - - private TracingUtil() { - } - - /** - * Initialize the tracing with the given service name. - * - * @param serviceName - */ - public static void initTracing(String serviceName) { - if (!GlobalTracer.isRegistered()) { - Configuration config = Configuration.fromEnv(serviceName); - JaegerTracer tracer = config.getTracerBuilder() - .registerExtractor(StringCodec.FORMAT, new StringCodec()) - .registerInjector(StringCodec.FORMAT, new StringCodec()) - .build(); - GlobalTracer.register(tracer); - } - } - - /** - * Export the active tracing span as a string. - * - * @return encoded tracing context. - */ - public static String exportCurrentSpan() { - if (GlobalTracer.get().activeSpan() != null) { - StringBuilder builder = new StringBuilder(); - GlobalTracer.get().inject(GlobalTracer.get().activeSpan().context(), - StringCodec.FORMAT, builder); - return builder.toString(); - } - return NULL_SPAN_AS_STRING; - } - - /** - * Export the specific span as a string. - * - * @return encoded tracing context. - */ - public static String exportSpan(Span span) { - if (span != null) { - StringBuilder builder = new StringBuilder(); - GlobalTracer.get().inject(span.context(), StringCodec.FORMAT, builder); - return builder.toString(); - } - return NULL_SPAN_AS_STRING; - } - - /** - * Create a new scope and use the imported span as the parent. - * - * @param name name of the newly created scope - * @param encodedParent Encoded parent span (could be null or empty) - * - * @return OpenTracing scope. - */ - public static Scope importAndCreateScope(String name, String encodedParent) { - Tracer.SpanBuilder spanBuilder; - Tracer tracer = GlobalTracer.get(); - SpanContext parentSpan = null; - if (encodedParent != null && encodedParent.length() > 0) { - StringBuilder builder = new StringBuilder(); - builder.append(encodedParent); - parentSpan = tracer.extract(StringCodec.FORMAT, builder); - - } - - if (parentSpan == null) { - spanBuilder = tracer.buildSpan(name); - } else { - spanBuilder = - tracer.buildSpan(name).asChildOf(parentSpan); - } - return spanBuilder.startActive(true); - } - - /** - * Creates a proxy of the implementation and trace all the method calls. - * - * @param delegate the original class instance - * @param interfce the interface which should be implemented by the proxy - * @param the type of the interface - * @param conf configuration - * - * @return A new interface which implements interfce but delegate all the - * calls to the delegate and also enables tracing. - */ - public static T createProxy(T delegate, Class interfce, - org.apache.hadoop.conf.Configuration conf) { - boolean isTracingEnabled = conf.getBoolean( - ScmConfigKeys.HDDS_TRACING_ENABLED, - ScmConfigKeys.HDDS_TRACING_ENABLED_DEFAULT); - if (!isTracingEnabled) { - return delegate; - } - Class aClass = delegate.getClass(); - return (T) Proxy.newProxyInstance(aClass.getClassLoader(), - new Class[] {interfce}, - new TraceAllMethod(delegate, interfce.getSimpleName())); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/package-info.java deleted file mode 100644 index 3ead03b6f6f80..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.tracing; - -/** - * Helper classes to use distributed tracing in Ozone components. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java deleted file mode 100644 index ca8d87053f7fc..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java +++ /dev/null @@ -1,164 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.utils; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Lists; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.concurrent.CompletionService; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorCompletionService; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * An abstract class for a background service in ozone. - * A background service schedules multiple child tasks in parallel - * in a certain period. In each interval, it waits until all the tasks - * finish execution and then schedule next interval. - */ -public abstract class BackgroundService { - - @VisibleForTesting - public static final Logger LOG = - LoggerFactory.getLogger(BackgroundService.class); - - // Executor to launch child tasks - private final ScheduledExecutorService exec; - private final ThreadGroup threadGroup; - private final ThreadFactory threadFactory; - private final String serviceName; - private final long interval; - private final long serviceTimeout; - private final TimeUnit unit; - private final PeriodicalTask service; - - public BackgroundService(String serviceName, long interval, - TimeUnit unit, int threadPoolSize, long serviceTimeout) { - this.interval = interval; - this.unit = unit; - this.serviceName = serviceName; - this.serviceTimeout = serviceTimeout; - threadGroup = new ThreadGroup(serviceName); - ThreadFactory tf = r -> new Thread(threadGroup, r); - threadFactory = new ThreadFactoryBuilder() - .setThreadFactory(tf) - .setDaemon(true) - .setNameFormat(serviceName + "#%d") - .build(); - exec = Executors.newScheduledThreadPool(threadPoolSize, threadFactory); - service = new PeriodicalTask(); - } - - protected ExecutorService getExecutorService() { - return this.exec; - } - - @VisibleForTesting - public int getThreadCount() { - return threadGroup.activeCount(); - } - - @VisibleForTesting - public void triggerBackgroundTaskForTesting() { - service.run(); - } - - // start service - public void start() { - exec.scheduleWithFixedDelay(service, 0, interval, unit); - } - - public abstract BackgroundTaskQueue getTasks(); - - /** - * Run one or more background tasks concurrently. - * Wait until all tasks to return the result. - */ - public class PeriodicalTask implements Runnable { - @Override - public synchronized void run() { - if (LOG.isDebugEnabled()) { - LOG.debug("Running background service : {}", serviceName); - } - BackgroundTaskQueue tasks = getTasks(); - if (tasks.isEmpty()) { - // No task found, or some problems to init tasks - // return and retry in next interval. - return; - } - if (LOG.isDebugEnabled()) { - LOG.debug("Number of background tasks to execute : {}", tasks.size()); - } - CompletionService taskCompletionService = - new ExecutorCompletionService<>(exec); - - List> results = Lists.newArrayList(); - while (tasks.size() > 0) { - BackgroundTask task = tasks.poll(); - Future result = - taskCompletionService.submit(task); - results.add(result); - } - - results.parallelStream().forEach(taskResultFuture -> { - try { - // Collect task results - BackgroundTaskResult result = serviceTimeout > 0 - ? taskResultFuture.get(serviceTimeout, unit) - : taskResultFuture.get(); - if (LOG.isDebugEnabled()) { - LOG.debug("task execution result size {}", result.getSize()); - } - } catch (InterruptedException | ExecutionException e) { - LOG.warn( - "Background task fails to execute, " - + "retrying in next interval", e); - } catch (TimeoutException e) { - LOG.warn("Background task executes timed out, " - + "retrying in next interval", e); - } - }); - } - } - - // shutdown and make sure all threads are properly released. - public void shutdown() { - LOG.info("Shutting down service {}", this.serviceName); - exec.shutdown(); - try { - if (!exec.awaitTermination(60, TimeUnit.SECONDS)) { - exec.shutdownNow(); - } - } catch (InterruptedException e) { - exec.shutdownNow(); - } - if (threadGroup.activeCount() == 0 && !threadGroup.isDestroyed()) { - threadGroup.destroy(); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java deleted file mode 100644 index d5ad2a394dd3a..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.utils; - -import java.util.concurrent.Callable; - -/** - * A task thread to run by {@link BackgroundService}. - */ -public interface BackgroundTask extends Callable { - - int getPriority(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskQueue.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskQueue.java deleted file mode 100644 index 005d14b8e3cc6..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskQueue.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.utils; - -import java.util.PriorityQueue; - -/** - * A priority queue that stores a number of {@link BackgroundTask}. - */ -public class BackgroundTaskQueue { - - private final PriorityQueue tasks; - - public BackgroundTaskQueue() { - tasks = new PriorityQueue<>((task1, task2) - -> task1.getPriority() - task2.getPriority()); - } - - /** - * @return the head task in this queue. - */ - public synchronized BackgroundTask poll() { - return tasks.poll(); - } - - /** - * Add a {@link BackgroundTask} to the queue, - * the task will be sorted by its priority. - * - * @param task - */ - public synchronized void add(BackgroundTask task) { - tasks.add(task); - } - - /** - * @return true if the queue contains no task, false otherwise. - */ - public synchronized boolean isEmpty() { - return tasks.isEmpty(); - } - - /** - * @return the size of the queue. - */ - public synchronized int size() { - return tasks.size(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskResult.java deleted file mode 100644 index be8032b06a3e2..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskResult.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.utils; - -/** - * Result of a {@link BackgroundTask}. - */ -public interface BackgroundTaskResult { - - /** - * Returns the size of entries included in this result. - */ - int getSize(); - - /** - * An empty task result implementation. - */ - class EmptyTaskResult implements BackgroundTaskResult { - - public static EmptyTaskResult newResult() { - return new EmptyTaskResult(); - } - - @Override - public int getSize() { - return 0; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BatchOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BatchOperation.java deleted file mode 100644 index 377c7f6a1a868..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BatchOperation.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import com.google.common.collect.Lists; - -import java.util.List; - -/** - * An utility class to store a batch of DB write operations. - */ -public class BatchOperation { - - /** - * Enum for write operations. - */ - public enum Operation { - DELETE, PUT - } - - private List operations = - Lists.newArrayList(); - - /** - * Add a PUT operation into the batch. - */ - public void put(byte[] key, byte[] value) { - operations.add(new SingleOperation(Operation.PUT, key, value)); - } - - /** - * Add a DELETE operation into the batch. - */ - public void delete(byte[] key) { - operations.add(new SingleOperation(Operation.DELETE, key, null)); - - } - - public List getOperations() { - return operations; - } - - /** - * A SingleOperation represents a PUT or DELETE operation - * and the data the operation needs to manipulates. - */ - public static class SingleOperation { - - private Operation opt; - private byte[] key; - private byte[] value; - - public SingleOperation(Operation opt, byte[] key, byte[] value) { - this.opt = opt; - if (key == null) { - throw new IllegalArgumentException("key cannot be null"); - } - this.key = key.clone(); - this.value = value == null ? null : value.clone(); - } - - public Operation getOpt() { - return opt; - } - - public byte[] getKey() { - return key.clone(); - } - - public byte[] getValue() { - return value == null ? null : value.clone(); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/EntryConsumer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/EntryConsumer.java deleted file mode 100644 index dc08c2bd63c8a..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/EntryConsumer.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import java.io.IOException; - -/** - * A consumer for metadata store key-value entries. - * Used by {@link MetadataStore} class. - */ -@FunctionalInterface -public interface EntryConsumer { - - /** - * Consumes a key and value and produces a boolean result. - * @param key key - * @param value value - * @return a boolean value produced by the consumer - * @throws IOException - */ - boolean consume(byte[] key, byte[] value) throws IOException; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java deleted file mode 100644 index 6a372d123712c..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.util.ClassUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class returns build information about Hadoop components. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public final class HddsVersionInfo { - - private static final Logger LOG = LoggerFactory.getLogger( - HddsVersionInfo.class); - - public static final VersionInfo HDDS_VERSION_INFO = - new VersionInfo("hdds"); - - private HddsVersionInfo() {} - - public static void main(String[] args) { - System.out.println("Using HDDS " + HDDS_VERSION_INFO.getVersion()); - System.out.println( - "Source code repository " + HDDS_VERSION_INFO.getUrl() + " -r " + - HDDS_VERSION_INFO.getRevision()); - System.out.println("Compiled by " + HDDS_VERSION_INFO.getUser() + " on " - + HDDS_VERSION_INFO.getDate()); - System.out.println( - "Compiled with protoc " + HDDS_VERSION_INFO.getProtocVersion()); - System.out.println( - "From source with checksum " + HDDS_VERSION_INFO.getSrcChecksum()); - if (LOG.isDebugEnabled()) { - LOG.debug("This command was run using " + - ClassUtil.findContainingJar(HddsVersionInfo.class)); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java deleted file mode 100644 index 0598987f9b50b..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter; -import org.fusesource.leveldbjni.JniDBFactory; -import org.iq80.leveldb.DB; -import org.iq80.leveldb.DBIterator; -import org.iq80.leveldb.Options; -import org.iq80.leveldb.ReadOptions; -import org.iq80.leveldb.Snapshot; -import org.iq80.leveldb.WriteBatch; -import org.iq80.leveldb.WriteOptions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -/** - * LevelDB interface. - */ -public class LevelDBStore implements MetadataStore { - - private static final Logger LOG = - LoggerFactory.getLogger(LevelDBStore.class); - - private DB db; - private final File dbFile; - private final Options dbOptions; - private final WriteOptions writeOptions; - - public LevelDBStore(File dbPath, boolean createIfMissing) - throws IOException { - dbOptions = new Options(); - dbOptions.createIfMissing(createIfMissing); - this.dbFile = dbPath; - this.writeOptions = new WriteOptions().sync(true); - openDB(dbPath, dbOptions); - } - - /** - * Opens a DB file. - * - * @param dbPath - DB File path - * @throws IOException - */ - public LevelDBStore(File dbPath, Options options) - throws IOException { - dbOptions = options; - this.dbFile = dbPath; - this.writeOptions = new WriteOptions().sync(true); - openDB(dbPath, dbOptions); - } - - private void openDB(File dbPath, Options options) throws IOException { - if (dbPath.getParentFile().mkdirs()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Db path {} created.", dbPath.getParentFile()); - } - } - db = JniDBFactory.factory.open(dbPath, options); - if (LOG.isDebugEnabled()) { - LOG.debug("LevelDB successfully opened"); - LOG.debug("[Option] cacheSize = " + options.cacheSize()); - LOG.debug("[Option] createIfMissing = " + options.createIfMissing()); - LOG.debug("[Option] blockSize = " + options.blockSize()); - LOG.debug("[Option] compressionType= " + options.compressionType()); - LOG.debug("[Option] maxOpenFiles= " + options.maxOpenFiles()); - LOG.debug("[Option] writeBufferSize= "+ options.writeBufferSize()); - } - } - - /** - * Puts a Key into file. - * - * @param key - key - * @param value - value - */ - @Override - public void put(byte[] key, byte[] value) { - db.put(key, value, writeOptions); - } - - /** - * Get Key. - * - * @param key key - * @return value - */ - @Override - public byte[] get(byte[] key) { - return db.get(key); - } - - /** - * Delete Key. - * - * @param key - Key - */ - @Override - public void delete(byte[] key) { - db.delete(key); - } - - /** - * Closes the DB. - * - * @throws IOException - */ - @Override - public void close() throws IOException { - if (db != null){ - db.close(); - } - } - - /** - * Returns true if the DB is empty. - * - * @return boolean - * @throws IOException - */ - @Override - public boolean isEmpty() throws IOException { - try (DBIterator iter = db.iterator()) { - iter.seekToFirst(); - boolean hasNext = !iter.hasNext(); - return hasNext; - } - } - - /** - * Returns the actual levelDB object. - * @return DB handle. - */ - public DB getDB() { - return db; - } - - /** - * Returns an iterator on all the key-value pairs in the DB. - * @return an iterator on DB entries. - */ - public DBIterator getIterator() { - return db.iterator(); - } - - - @Override - public void destroy() throws IOException { - close(); - JniDBFactory.factory.destroy(dbFile, dbOptions); - } - - @Override - public ImmutablePair peekAround(int offset, - byte[] from) throws IOException, IllegalArgumentException { - try (DBIterator it = db.iterator()) { - if (from == null) { - it.seekToFirst(); - } else { - it.seek(from); - } - if (!it.hasNext()) { - return null; - } - switch (offset) { - case 0: - Entry current = it.next(); - return new ImmutablePair<>(current.getKey(), current.getValue()); - case 1: - if (it.next() != null && it.hasNext()) { - Entry next = it.peekNext(); - return new ImmutablePair<>(next.getKey(), next.getValue()); - } - break; - case -1: - if (it.hasPrev()) { - Entry prev = it.peekPrev(); - return new ImmutablePair<>(prev.getKey(), prev.getValue()); - } - break; - default: - throw new IllegalArgumentException( - "Position can only be -1, 0 " + "or 1, but found " + offset); - } - } - return null; - } - - @Override - public void iterate(byte[] from, EntryConsumer consumer) - throws IOException { - try (DBIterator iter = db.iterator()) { - if (from != null) { - iter.seek(from); - } else { - iter.seekToFirst(); - } - while (iter.hasNext()) { - Entry current = iter.next(); - if (!consumer.consume(current.getKey(), - current.getValue())) { - break; - } - } - } - } - - /** - * Compacts the DB by removing deleted keys etc. - * @throws IOException if there is an error. - */ - @Override - public void compactDB() throws IOException { - if(db != null) { - // From LevelDB docs : begin == null and end == null means the whole DB. - db.compactRange(null, null); - } - } - - @Override - public void flushDB(boolean sync) { - // TODO: Implement flush for level db - // do nothing - } - - @Override - public void writeBatch(BatchOperation operation) throws IOException { - List operations = - operation.getOperations(); - if (!operations.isEmpty()) { - try (WriteBatch writeBatch = db.createWriteBatch()) { - for (BatchOperation.SingleOperation opt : operations) { - switch (opt.getOpt()) { - case DELETE: - writeBatch.delete(opt.getKey()); - break; - case PUT: - writeBatch.put(opt.getKey(), opt.getValue()); - break; - default: - throw new IllegalArgumentException("Invalid operation " - + opt.getOpt()); - } - } - db.write(writeBatch); - } - } - } - - @Override - public List> getRangeKVs(byte[] startKey, - int count, MetadataKeyFilters.MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException { - return getRangeKVs(startKey, count, false, filters); - } - - @Override - public List> getSequentialRangeKVs(byte[] startKey, - int count, MetadataKeyFilters.MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException { - return getRangeKVs(startKey, count, true, filters); - } - - /** - * Returns a certain range of key value pairs as a list based on a - * startKey or count. Further a {@link MetadataKeyFilter} can be added to - * filter keys if necessary. To prevent race conditions while listing - * entries, this implementation takes a snapshot and lists the entries from - * the snapshot. This may, on the other hand, cause the range result slight - * different with actual data if data is updating concurrently. - *

- * If the startKey is specified and found in levelDB, this key and the keys - * after this key will be included in the result. If the startKey is null - * all entries will be included as long as other conditions are satisfied. - * If the given startKey doesn't exist, an empty list will be returned. - *

- * The count argument is to limit number of total entries to return, - * the value for count must be an integer greater than 0. - *

- * This method allows to specify one or more {@link MetadataKeyFilter} - * to filter keys by certain condition. Once given, only the entries - * whose key passes all the filters will be included in the result. - * - * @param startKey a start key. - * @param count max number of entries to return. - * @param filters customized one or more {@link MetadataKeyFilter}. - * @return a list of entries found in the database or an empty list if the - * startKey is invalid. - * @throws IOException if there are I/O errors. - * @throws IllegalArgumentException if count is less than 0. - */ - private List> getRangeKVs(byte[] startKey, - int count, boolean sequential, MetadataKeyFilter... filters) - throws IOException { - List> result = new ArrayList<>(); - long start = System.currentTimeMillis(); - if (count < 0) { - throw new IllegalArgumentException( - "Invalid count given " + count + ", count must be greater than 0"); - } - Snapshot snapShot = null; - DBIterator dbIter = null; - try { - snapShot = db.getSnapshot(); - ReadOptions readOptions = new ReadOptions().snapshot(snapShot); - dbIter = db.iterator(readOptions); - if (startKey == null) { - dbIter.seekToFirst(); - } else { - if (db.get(startKey) == null) { - // Key not found, return empty list - return result; - } - dbIter.seek(startKey); - } - while (dbIter.hasNext() && result.size() < count) { - byte[] preKey = dbIter.hasPrev() ? dbIter.peekPrev().getKey() : null; - byte[] nextKey = dbIter.hasNext() ? dbIter.peekNext().getKey() : null; - Entry current = dbIter.next(); - - if (filters == null) { - result.add(current); - } else { - if (Arrays.asList(filters).stream().allMatch( - entry -> entry.filterKey(preKey, current.getKey(), nextKey))) { - result.add(current); - } else { - if (result.size() > 0 && sequential) { - // if the caller asks for a sequential range of results, - // and we met a dis-match, abort iteration from here. - // if result is empty, we continue to look for the first match. - break; - } - } - } - } - } finally { - if (snapShot != null) { - snapShot.close(); - } - if (dbIter != null) { - dbIter.close(); - } - if (LOG.isDebugEnabled()) { - if (filters != null) { - for (MetadataKeyFilters.MetadataKeyFilter filter : filters) { - int scanned = filter.getKeysScannedNum(); - int hinted = filter.getKeysHintedNum(); - if (scanned > 0 || hinted > 0) { - if (LOG.isDebugEnabled()) { - LOG.debug( - "getRangeKVs ({}) numOfKeysScanned={}, numOfKeysHinted={}", - filter.getClass().getSimpleName(), - filter.getKeysScannedNum(), filter.getKeysHintedNum()); - } - } - } - } - long end = System.currentTimeMillis(); - long timeConsumed = end - start; - if (LOG.isDebugEnabled()) { - LOG.debug("Time consumed for getRangeKVs() is {}ms," - + " result length is {}.", timeConsumed, result.size()); - } - } - } - return result; - } - - @Override - public MetaStoreIterator iterator() { - return new LevelDBStoreIterator(db.iterator()); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java deleted file mode 100644 index f5b6769b70de3..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import org.iq80.leveldb.DBIterator; -import java.util.Map; -import java.util.NoSuchElementException; - -/** - * LevelDB store iterator. - */ -public class LevelDBStoreIterator - implements MetaStoreIterator { - - - private DBIterator levelDBIterator; - - public LevelDBStoreIterator(DBIterator iterator) { - this.levelDBIterator = iterator; - levelDBIterator.seekToFirst(); - } - - @Override - public boolean hasNext() { - return levelDBIterator.hasNext(); - } - - @Override - public MetadataStore.KeyValue next() { - if(levelDBIterator.hasNext()) { - Map.Entry entry = levelDBIterator.next(); - return MetadataStore.KeyValue.create(entry.getKey(), entry.getValue()); - } - throw new NoSuchElementException("LevelDB Store has no more elements"); - } - - @Override - public void seekToFirst() { - levelDBIterator.seekToFirst(); - } - - @Override - public void seekToLast() { - levelDBIterator.seekToLast(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetaStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetaStoreIterator.java deleted file mode 100644 index 2a33de712ea09..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetaStoreIterator.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import java.util.Iterator; - -/** - * Iterator for MetaDataStore DB. - * @param - */ -public interface MetaStoreIterator extends Iterator { - - /** - * seek to first entry. - */ - void seekToFirst(); - - /** - * seek to last entry. - */ - void seekToLast(); - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java deleted file mode 100644 index a88ce475bab59..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; - -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.OzoneConsts; - -import java.util.ArrayList; -import java.util.List; - -/** - * An utility class to filter levelDB keys. - */ -public final class MetadataKeyFilters { - - private static KeyPrefixFilter deletingKeyFilter = - new MetadataKeyFilters.KeyPrefixFilter() - .addFilter(OzoneConsts.DELETING_KEY_PREFIX); - - private static KeyPrefixFilter deletedKeyFilter = - new MetadataKeyFilters.KeyPrefixFilter() - .addFilter(OzoneConsts.DELETED_KEY_PREFIX); - - private static KeyPrefixFilter normalKeyFilter = - new MetadataKeyFilters.KeyPrefixFilter() - .addFilter(OzoneConsts.DELETING_KEY_PREFIX, true) - .addFilter(OzoneConsts.DELETED_KEY_PREFIX, true) - .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true) - .addFilter(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX, true); - - private MetadataKeyFilters() { - } - - public static KeyPrefixFilter getDeletingKeyFilter() { - return deletingKeyFilter; - } - - public static KeyPrefixFilter getDeletedKeyFilter() { - return deletedKeyFilter; - } - - public static KeyPrefixFilter getNormalKeyFilter() { - return normalKeyFilter; - } - /** - * Interface for levelDB key filters. - */ - public interface MetadataKeyFilter { - /** - * Filter levelDB key with a certain condition. - * - * @param preKey previous key. - * @param currentKey current key. - * @param nextKey next key. - * @return true if a certain condition satisfied, return false otherwise. - */ - boolean filterKey(byte[] preKey, byte[] currentKey, byte[] nextKey); - - default int getKeysScannedNum() { - return 0; - } - - default int getKeysHintedNum() { - return 0; - } - } - - /** - * Utility class to filter key by a string prefix. This filter - * assumes keys can be parsed to a string. - */ - public static class KeyPrefixFilter implements MetadataKeyFilter { - - private List positivePrefixList = new ArrayList<>(); - private List negativePrefixList = new ArrayList<>(); - private boolean atleastOnePositiveMatch; - private int keysScanned = 0; - private int keysHinted = 0; - - public KeyPrefixFilter() {} - - /** - * KeyPrefixFilter constructor. It is made of positive and negative prefix - * list. PositivePrefixList is the list of prefixes which are accepted - * whereas negativePrefixList contains the list of prefixes which are - * rejected. - * - * @param atleastOnePositiveMatch if positive it requires key to be accepted - * by atleast one positive filter. - */ - public KeyPrefixFilter(boolean atleastOnePositiveMatch) { - this.atleastOnePositiveMatch = atleastOnePositiveMatch; - } - - public KeyPrefixFilter addFilter(String keyPrefix) { - addFilter(keyPrefix, false); - return this; - } - - public KeyPrefixFilter addFilter(String keyPrefix, boolean negative) { - Preconditions.checkArgument(!Strings.isNullOrEmpty(keyPrefix), - "KeyPrefix is null or empty: " + keyPrefix); - // keyPrefix which needs to be added should not be prefix of any opposing - // filter already present. If keyPrefix is a negative filter it should not - // be a prefix of any positive filter. Nor should any opposing filter be - // a prefix of keyPrefix. - // For example if b0 is accepted b can not be rejected and - // if b is accepted b0 can not be rejected. If these scenarios need to be - // handled we need to add priorities. - if (negative) { - Preconditions.checkArgument(positivePrefixList.stream().noneMatch( - prefix -> prefix.startsWith(keyPrefix) || keyPrefix - .startsWith(prefix)), - "KeyPrefix: " + keyPrefix + " already accepted."); - this.negativePrefixList.add(keyPrefix); - } else { - Preconditions.checkArgument(negativePrefixList.stream().noneMatch( - prefix -> prefix.startsWith(keyPrefix) || keyPrefix - .startsWith(prefix)), - "KeyPrefix: " + keyPrefix + " already rejected."); - this.positivePrefixList.add(keyPrefix); - } - return this; - } - - @Override - public boolean filterKey(byte[] preKey, byte[] currentKey, - byte[] nextKey) { - keysScanned++; - if (currentKey == null) { - return false; - } - boolean accept; - - // There are no filters present - if (positivePrefixList.isEmpty() && negativePrefixList.isEmpty()) { - return true; - } - - accept = !positivePrefixList.isEmpty() && positivePrefixList.stream() - .anyMatch(prefix -> { - byte[] prefixBytes = DFSUtil.string2Bytes(prefix); - return prefixMatch(prefixBytes, currentKey); - }); - if (accept) { - keysHinted++; - return true; - } else if (atleastOnePositiveMatch) { - return false; - } - - accept = !negativePrefixList.isEmpty() && negativePrefixList.stream() - .allMatch(prefix -> { - byte[] prefixBytes = DFSUtil.string2Bytes(prefix); - return !prefixMatch(prefixBytes, currentKey); - }); - if (accept) { - keysHinted++; - return true; - } - - return false; - } - - @Override - public int getKeysScannedNum() { - return keysScanned; - } - - @Override - public int getKeysHintedNum() { - return keysHinted; - } - - private static boolean prefixMatch(byte[] prefix, byte[] key) { - Preconditions.checkNotNull(prefix); - Preconditions.checkNotNull(key); - if (key.length < prefix.length) { - return false; - } - for (int i = 0; i < prefix.length; i++) { - if (key[i] != prefix[i]) { - return false; - } - } - return true; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStore.java deleted file mode 100644 index f05e6d2d275aa..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStore.java +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.Map; - -/** - * Interface for key-value store that stores ozone metadata. - * Ozone metadata is stored as key value pairs, both key and value - * are arbitrary byte arrays. - */ -@InterfaceStability.Evolving -public interface MetadataStore extends Closeable{ - - /** - * Puts a key-value pair into the store. - * - * @param key metadata key - * @param value metadata value - */ - void put(byte[] key, byte[] value) throws IOException; - - /** - * @return true if the metadata store is empty. - * - * @throws IOException - */ - boolean isEmpty() throws IOException; - - /** - * Returns the value mapped to the given key in byte array. - * - * @param key metadata key - * @return value in byte array - * @throws IOException - */ - byte[] get(byte[] key) throws IOException; - - /** - * Deletes a key from the metadata store. - * - * @param key metadata key - * @throws IOException - */ - void delete(byte[] key) throws IOException; - - /** - * Returns a certain range of key value pairs as a list based on a - * startKey or count. Further a {@link MetadataKeyFilter} can be added to - * filter keys if necessary. To prevent race conditions while listing - * entries, this implementation takes a snapshot and lists the entries from - * the snapshot. This may, on the other hand, cause the range result slight - * different with actual data if data is updating concurrently. - *

- * If the startKey is specified and found in levelDB, this key and the keys - * after this key will be included in the result. If the startKey is null - * all entries will be included as long as other conditions are satisfied. - * If the given startKey doesn't exist and empty list will be returned. - *

- * The count argument is to limit number of total entries to return, - * the value for count must be an integer greater than 0. - *

- * This method allows to specify one or more {@link MetadataKeyFilter} - * to filter keys by certain condition. Once given, only the entries - * whose key passes all the filters will be included in the result. - * - * @param startKey a start key. - * @param count max number of entries to return. - * @param filters customized one or more {@link MetadataKeyFilter}. - * @return a list of entries found in the database or an empty list if the - * startKey is invalid. - * @throws IOException if there are I/O errors. - * @throws IllegalArgumentException if count is less than 0. - */ - List> getRangeKVs(byte[] startKey, - int count, MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException; - - /** - * This method is very similar to {@link #getRangeKVs}, the only - * different is this method is supposed to return a sequential range - * of elements based on the filters. While iterating the elements, - * if it met any entry that cannot pass the filter, the iterator will stop - * from this point without looking for next match. If no filter is given, - * this method behaves just like {@link #getRangeKVs}. - * - * @param startKey a start key. - * @param count max number of entries to return. - * @param filters customized one or more {@link MetadataKeyFilter}. - * @return a list of entries found in the database. - * @throws IOException - * @throws IllegalArgumentException - */ - List> getSequentialRangeKVs(byte[] startKey, - int count, MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException; - - /** - * A batch of PUT, DELETE operations handled as a single atomic write. - * - * @throws IOException write fails - */ - void writeBatch(BatchOperation operation) throws IOException; - - /** - * Compact the entire database. - * @throws IOException - */ - void compactDB() throws IOException; - - /** - * Flush the outstanding I/O operations of the DB. - * @param sync if true will sync the outstanding I/Os to the disk. - */ - void flushDB(boolean sync) throws IOException; - - /** - * Destroy the content of the specified database, - * a destroyed database will not be able to load again. - * Be very careful with this method. - * - * @throws IOException if I/O error happens - */ - void destroy() throws IOException; - - /** - * Seek the database to a certain key, returns the key-value - * pairs around this key based on the given offset. Note, this method - * can only support offset -1 (left), 0 (current) and 1 (right), - * any other offset given will cause a {@link IllegalArgumentException}. - * - * @param offset offset to the key - * @param from from which key - * @return a key-value pair - * @throws IOException - */ - ImmutablePair peekAround(int offset, byte[] from) - throws IOException, IllegalArgumentException; - - /** - * Iterates entries in the database from a certain key. - * Applies the given {@link EntryConsumer} to the key and value of - * each entry, the function produces a boolean result which is used - * as the criteria to exit from iteration. - * - * @param from the start key - * @param consumer - * a {@link EntryConsumer} applied to each key and value. If the consumer - * returns true, continues the iteration to next entry; otherwise exits - * the iteration. - * @throws IOException - */ - void iterate(byte[] from, EntryConsumer consumer) - throws IOException; - - /** - * Returns the iterator for this metadata store. - * @return MetaStoreIterator - */ - MetaStoreIterator iterator(); - - /** - * Class used to represent the key and value pair of a db entry. - */ - class KeyValue { - - private final byte[] key; - private final byte[] value; - - /** - * KeyValue Constructor, used to represent a key and value of a db entry. - * @param key - * @param value - */ - private KeyValue(byte[] key, byte[] value) { - this.key = key; - this.value = value; - } - - /** - * Return key. - * @return byte[] - */ - public byte[] getKey() { - byte[] result = new byte[key.length]; - System.arraycopy(key, 0, result, 0, key.length); - return result; - } - - /** - * Return value. - * @return byte[] - */ - public byte[] getValue() { - byte[] result = new byte[value.length]; - System.arraycopy(value, 0, result, 0, value.length); - return result; - } - - /** - * Create a KeyValue pair. - * @param key - * @param value - * @return KeyValue object. - */ - public static KeyValue create(byte[] key, byte[] value) { - return new KeyValue(key, value); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java deleted file mode 100644 index 85bb6aa4bffc5..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import java.io.File; -import java.io.IOException; -import java.util.Optional; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConfigKeys; - -import com.google.common.annotations.VisibleForTesting; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF; -import org.iq80.leveldb.Options; -import org.rocksdb.BlockBasedTableConfig; -import org.rocksdb.Statistics; -import org.rocksdb.StatsLevel; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Builder for metadata store. - */ -public class MetadataStoreBuilder { - - @VisibleForTesting - static final Logger LOG = - LoggerFactory.getLogger(MetadataStoreBuilder.class); - private File dbFile; - private long cacheSize; - private boolean createIfMissing = true; - private Optional optionalConf = Optional.empty(); - private String dbType; - - public static MetadataStoreBuilder newBuilder() { - return new MetadataStoreBuilder(); - } - - public MetadataStoreBuilder setDbFile(File dbPath) { - this.dbFile = dbPath; - return this; - } - - public MetadataStoreBuilder setCacheSize(long cache) { - this.cacheSize = cache; - return this; - } - - public MetadataStoreBuilder setCreateIfMissing(boolean doCreate) { - this.createIfMissing = doCreate; - return this; - } - - public MetadataStoreBuilder setConf(Configuration configuration) { - this.optionalConf = Optional.of(configuration); - return this; - } - - /** - * Set the container DB Type. - * @param type - * @return MetadataStoreBuilder - */ - public MetadataStoreBuilder setDBType(String type) { - this.dbType = type; - return this; - } - - - public MetadataStore build() throws IOException { - if (dbFile == null) { - throw new IllegalArgumentException("Failed to build metadata store, " - + "dbFile is required but not found"); - } - - // Build db store based on configuration - final Configuration conf = optionalConf.orElseGet( - () -> new OzoneConfiguration()); - - if(dbType == null) { - LOG.debug("dbType is null, using "); - dbType = conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT); - LOG.debug("dbType is null, using dbType {} from ozone configuration", - dbType); - } else { - LOG.debug("Using dbType {} for metastore", dbType); - } - if (OZONE_METADATA_STORE_IMPL_LEVELDB.equals(dbType)) { - Options options = new Options(); - options.createIfMissing(createIfMissing); - if (cacheSize > 0) { - options.cacheSize(cacheSize); - } - return new LevelDBStore(dbFile, options); - } else if (OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(dbType)) { - org.rocksdb.Options opts = new org.rocksdb.Options(); - opts.setCreateIfMissing(createIfMissing); - - if (cacheSize > 0) { - BlockBasedTableConfig tableConfig = new BlockBasedTableConfig(); - tableConfig.setBlockCacheSize(cacheSize); - opts.setTableFormatConfig(tableConfig); - } - - String rocksDbStat = conf.getTrimmed( - OZONE_METADATA_STORE_ROCKSDB_STATISTICS, - OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT); - - if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { - Statistics statistics = new Statistics(); - statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); - opts = opts.setStatistics(statistics); - - } - return new RocksDBStore(dbFile, opts); - } - - throw new IllegalArgumentException("Invalid argument for " - + OzoneConfigKeys.OZONE_METADATA_STORE_IMPL - + ". Expecting " + OZONE_METADATA_STORE_IMPL_LEVELDB - + " or " + OZONE_METADATA_STORE_IMPL_ROCKSDB - + ", but met " + dbType); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RetriableTask.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RetriableTask.java deleted file mode 100644 index a3ee1fd51bd85..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RetriableTask.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; - -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.util.ThreadUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.concurrent.Callable; - -/** - * {@code Callable} implementation that retries a delegate task according to - * the specified {@code RetryPolicy}. Sleeps between retries in the caller - * thread. - * - * @param the result type of method {@code call} - */ -public class RetriableTask implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(RetriableTask.class); - - private final String name; - private final Callable task; - private final RetryPolicy retryPolicy; - - public RetriableTask(RetryPolicy retryPolicy, String name, Callable task) { - this.retryPolicy = retryPolicy; - this.name = name; - this.task = task; - } - - @Override - public V call() throws Exception { - int attempts = 0; - Exception cause; - while (true) { - try { - return task.call(); - } catch (Exception e) { - cause = e; - RetryPolicy.RetryAction action = retryPolicy.shouldRetry(e, ++attempts, - 0, true); - if (action.action == RetryPolicy.RetryAction.RetryDecision.RETRY) { - LOG.info("Execution of task {} failed, will be retried in {} ms", - name, action.delayMillis); - ThreadUtil.sleepAtLeastIgnoreInterrupts(action.delayMillis); - } else { - break; - } - } - } - - String msg = String.format( - "Execution of task %s failed permanently after %d attempts", - name, attempts); - LOG.warn(msg, cause); - throw new IOException(msg, cause); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStore.java deleted file mode 100644 index 7dd1bde1b7795..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStore.java +++ /dev/null @@ -1,405 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import com.google.common.base.Preconditions; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.ratis.thirdparty.com.google.common.annotations. - VisibleForTesting; -import org.rocksdb.DbPath; -import org.rocksdb.Options; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; -import org.rocksdb.RocksIterator; -import org.rocksdb.WriteBatch; -import org.rocksdb.WriteOptions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.ObjectName; -import java.io.File; -import java.io.IOException; -import java.util.AbstractMap; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * RocksDB implementation of ozone metadata store. - */ -public class RocksDBStore implements MetadataStore { - - private static final Logger LOG = - LoggerFactory.getLogger(RocksDBStore.class); - - private RocksDB db = null; - private File dbLocation; - private WriteOptions writeOptions; - private Options dbOptions; - private ObjectName statMBeanName; - - public RocksDBStore(File dbFile, Options options) - throws IOException { - Preconditions.checkNotNull(dbFile, "DB file location cannot be null"); - RocksDB.loadLibrary(); - dbOptions = options; - dbLocation = dbFile; - writeOptions = new WriteOptions(); - try { - - db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath()); - if (dbOptions.statistics() != null) { - - Map jmxProperties = new HashMap(); - jmxProperties.put("dbName", dbFile.getName()); - statMBeanName = HddsUtils.registerWithJmxProperties( - "Ozone", "RocksDbStore", jmxProperties, - RocksDBStoreMBean.create(dbOptions.statistics(), - dbFile.getName())); - if (statMBeanName == null) { - LOG.warn("jmx registration failed during RocksDB init, db path :{}", - dbFile.getAbsolutePath()); - } - } - } catch (RocksDBException e) { - throw new IOException( - "Failed init RocksDB, db path : " + dbFile.getAbsolutePath(), e); - } - - if (LOG.isDebugEnabled()) { - LOG.debug("RocksDB successfully opened."); - LOG.debug("[Option] dbLocation= {}", dbLocation.getAbsolutePath()); - LOG.debug("[Option] createIfMissing = {}", options.createIfMissing()); - LOG.debug("[Option] compactionPriority= {}", options.compactionStyle()); - LOG.debug("[Option] compressionType= {}", options.compressionType()); - LOG.debug("[Option] maxOpenFiles= {}", options.maxOpenFiles()); - LOG.debug("[Option] writeBufferSize= {}", options.writeBufferSize()); - } - } - - public static IOException toIOException(String msg, RocksDBException e) { - String statusCode = e.getStatus() == null ? "N/A" : - e.getStatus().getCodeString(); - String errMessage = e.getMessage() == null ? "Unknown error" : - e.getMessage(); - String output = msg + "; status : " + statusCode - + "; message : " + errMessage; - return new IOException(output, e); - } - - @Override - public void put(byte[] key, byte[] value) throws IOException { - try { - db.put(writeOptions, key, value); - } catch (RocksDBException e) { - throw toIOException("Failed to put key-value to metadata store", e); - } - } - - @Override - public boolean isEmpty() throws IOException { - RocksIterator it = null; - try { - it = db.newIterator(); - it.seekToFirst(); - return !it.isValid(); - } finally { - if (it != null) { - it.close(); - } - } - } - - @Override - public byte[] get(byte[] key) throws IOException { - try { - return db.get(key); - } catch (RocksDBException e) { - throw toIOException("Failed to get the value for the given key", e); - } - } - - @Override - public void delete(byte[] key) throws IOException { - try { - db.delete(key); - } catch (RocksDBException e) { - throw toIOException("Failed to delete the given key", e); - } - } - - @Override - public List> getRangeKVs(byte[] startKey, - int count, MetadataKeyFilters.MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException { - return getRangeKVs(startKey, count, false, filters); - } - - @Override - public List> getSequentialRangeKVs(byte[] startKey, - int count, MetadataKeyFilters.MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException { - return getRangeKVs(startKey, count, true, filters); - } - - private List> getRangeKVs(byte[] startKey, - int count, boolean sequential, - MetadataKeyFilters.MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException { - List> result = new ArrayList<>(); - long start = System.currentTimeMillis(); - if (count < 0) { - throw new IllegalArgumentException( - "Invalid count given " + count + ", count must be greater than 0"); - } - RocksIterator it = null; - try { - it = db.newIterator(); - if (startKey == null) { - it.seekToFirst(); - } else { - if(get(startKey) == null) { - // Key not found, return empty list - return result; - } - it.seek(startKey); - } - while(it.isValid() && result.size() < count) { - byte[] currentKey = it.key(); - byte[] currentValue = it.value(); - - it.prev(); - final byte[] prevKey = it.isValid() ? it.key() : null; - - it.seek(currentKey); - it.next(); - final byte[] nextKey = it.isValid() ? it.key() : null; - - if (filters == null) { - result.add(new AbstractMap.SimpleImmutableEntry<>(currentKey, - currentValue)); - } else { - if (Arrays.asList(filters).stream() - .allMatch(entry -> entry.filterKey(prevKey, - currentKey, nextKey))) { - result.add(new AbstractMap.SimpleImmutableEntry<>(currentKey, - currentValue)); - } else { - if (result.size() > 0 && sequential) { - // if the caller asks for a sequential range of results, - // and we met a dis-match, abort iteration from here. - // if result is empty, we continue to look for the first match. - break; - } - } - } - } - } finally { - if (it != null) { - it.close(); - } - long end = System.currentTimeMillis(); - long timeConsumed = end - start; - if (LOG.isDebugEnabled()) { - if (filters != null) { - for (MetadataKeyFilters.MetadataKeyFilter filter : filters) { - int scanned = filter.getKeysScannedNum(); - int hinted = filter.getKeysHintedNum(); - if (scanned > 0 || hinted > 0) { - LOG.debug( - "getRangeKVs ({}) numOfKeysScanned={}, numOfKeysHinted={}", - filter.getClass().getSimpleName(), filter.getKeysScannedNum(), - filter.getKeysHintedNum()); - } - } - } - LOG.debug("Time consumed for getRangeKVs() is {}ms," - + " result length is {}.", timeConsumed, result.size()); - } - } - return result; - } - - @Override - public void writeBatch(BatchOperation operation) - throws IOException { - List operations = - operation.getOperations(); - if (!operations.isEmpty()) { - try (WriteBatch writeBatch = new WriteBatch()) { - for (BatchOperation.SingleOperation opt : operations) { - switch (opt.getOpt()) { - case DELETE: - writeBatch.delete(opt.getKey()); - break; - case PUT: - writeBatch.put(opt.getKey(), opt.getValue()); - break; - default: - throw new IllegalArgumentException("Invalid operation " - + opt.getOpt()); - } - } - db.write(writeOptions, writeBatch); - } catch (RocksDBException e) { - throw toIOException("Batch write operation failed", e); - } - } - } - - @Override - public void compactDB() throws IOException { - if (db != null) { - try { - db.compactRange(); - } catch (RocksDBException e) { - throw toIOException("Failed to compact db", e); - } - } - } - - @Override - public void flushDB(boolean sync) throws IOException { - if (db != null) { - try { - // for RocksDB it is sufficient to flush the WAL as entire db can - // be reconstructed using it. - db.flushWal(sync); - } catch (RocksDBException e) { - throw toIOException("Failed to flush db", e); - } - } - } - - private void deleteQuietly(File fileOrDir) { - if (fileOrDir != null && fileOrDir.exists()) { - try { - FileUtils.forceDelete(fileOrDir); - } catch (IOException e) { - LOG.warn("Failed to delete dir {}", fileOrDir.getAbsolutePath(), e); - } - } - } - - @Override - public void destroy() throws IOException { - // Make sure db is closed. - close(); - - // There is no destroydb java API available, - // equivalently we can delete all db directories. - deleteQuietly(dbLocation); - deleteQuietly(new File(dbOptions.dbLogDir())); - deleteQuietly(new File(dbOptions.walDir())); - List dbPaths = dbOptions.dbPaths(); - if (dbPaths != null) { - dbPaths.forEach(dbPath -> { - deleteQuietly(new File(dbPath.toString())); - }); - } - } - - @Override - public ImmutablePair peekAround(int offset, - byte[] from) throws IOException, IllegalArgumentException { - RocksIterator it = null; - try { - it = db.newIterator(); - if (from == null) { - it.seekToFirst(); - } else { - it.seek(from); - } - if (!it.isValid()) { - return null; - } - - switch (offset) { - case 0: - break; - case 1: - it.next(); - break; - case -1: - it.prev(); - break; - default: - throw new IllegalArgumentException( - "Position can only be -1, 0 " + "or 1, but found " + offset); - } - return it.isValid() ? new ImmutablePair<>(it.key(), it.value()) : null; - } finally { - if (it != null) { - it.close(); - } - } - } - - @Override - public void iterate(byte[] from, EntryConsumer consumer) - throws IOException { - RocksIterator it = null; - try { - it = db.newIterator(); - if (from != null) { - it.seek(from); - } else { - it.seekToFirst(); - } - while (it.isValid()) { - if (!consumer.consume(it.key(), it.value())) { - break; - } - it.next(); - } - } finally { - if (it != null) { - it.close(); - } - } - } - - @Override - public void close() throws IOException { - if (statMBeanName != null) { - MBeans.unregister(statMBeanName); - statMBeanName = null; - } - if (db != null) { - db.close(); - } - - } - - @VisibleForTesting - protected ObjectName getStatMBeanName() { - return statMBeanName; - } - - @Override - public MetaStoreIterator iterator() { - return new RocksDBStoreIterator(db.newIterator()); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreIterator.java deleted file mode 100644 index e39ec5774580b..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreIterator.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package org.apache.hadoop.hdds.utils; - -import org.rocksdb.RocksIterator; - -import java.util.NoSuchElementException; - -/** - * RocksDB store iterator. - */ -public class RocksDBStoreIterator - implements MetaStoreIterator { - - private RocksIterator rocksDBIterator; - - public RocksDBStoreIterator(RocksIterator iterator) { - this.rocksDBIterator = iterator; - rocksDBIterator.seekToFirst(); - } - - @Override - public boolean hasNext() { - return rocksDBIterator.isValid(); - } - - @Override - public MetadataStore.KeyValue next() { - if (rocksDBIterator.isValid()) { - MetadataStore.KeyValue value = - MetadataStore.KeyValue.create(rocksDBIterator.key(), rocksDBIterator - .value()); - rocksDBIterator.next(); - return value; - } - throw new NoSuchElementException("RocksDB Store has no more elements"); - } - - @Override - public void seekToFirst() { - rocksDBIterator.seekToFirst(); - } - - @Override - public void seekToLast() { - rocksDBIterator.seekToLast(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreMBean.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreMBean.java deleted file mode 100644 index 60d4db880c4ab..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreMBean.java +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.Interns; -import org.rocksdb.HistogramData; -import org.rocksdb.HistogramType; -import org.rocksdb.Statistics; -import org.rocksdb.TickerType; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.Attribute; -import javax.management.AttributeList; -import javax.management.AttributeNotFoundException; -import javax.management.DynamicMBean; -import javax.management.InvalidAttributeValueException; -import javax.management.MBeanAttributeInfo; -import javax.management.MBeanException; -import javax.management.MBeanInfo; -import javax.management.ReflectionException; -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -/** - * Adapter JMX bean to publish all the Rocksdb metrics. - */ -public class RocksDBStoreMBean implements DynamicMBean, MetricsSource { - - private Statistics statistics; - - private Set histogramAttributes = new HashSet<>(); - - private String contextName; - - private static final Logger LOG = - LoggerFactory.getLogger(RocksDBStoreMBean.class); - - public final static String ROCKSDB_CONTEXT_PREFIX = "Rocksdb_"; - - public RocksDBStoreMBean(Statistics statistics, String dbName) { - this.contextName = ROCKSDB_CONTEXT_PREFIX + dbName; - this.statistics = statistics; - histogramAttributes.add("Average"); - histogramAttributes.add("Median"); - histogramAttributes.add("Percentile95"); - histogramAttributes.add("Percentile99"); - histogramAttributes.add("StandardDeviation"); - } - - public static RocksDBStoreMBean create(Statistics statistics, - String contextName) { - - RocksDBStoreMBean rocksDBStoreMBean = new RocksDBStoreMBean( - statistics, contextName); - MetricsSystem ms = DefaultMetricsSystem.instance(); - MetricsSource metricsSource = ms.getSource(rocksDBStoreMBean.contextName); - if (metricsSource != null) { - return (RocksDBStoreMBean)metricsSource; - } else { - return ms.register(rocksDBStoreMBean.contextName, - "RocksDB Metrics", - rocksDBStoreMBean); - } - } - - @Override - public Object getAttribute(String attribute) - throws AttributeNotFoundException, MBeanException, ReflectionException { - for (String histogramAttribute : histogramAttributes) { - if (attribute.endsWith("_" + histogramAttribute.toUpperCase())) { - String keyName = attribute - .substring(0, attribute.length() - histogramAttribute.length() - 1); - try { - HistogramData histogram = - statistics.getHistogramData(HistogramType.valueOf(keyName)); - try { - Method method = - HistogramData.class.getMethod("get" + histogramAttribute); - return method.invoke(histogram); - } catch (Exception e) { - throw new ReflectionException(e, - "Can't read attribute " + attribute); - } - } catch (IllegalArgumentException exception) { - throw new AttributeNotFoundException( - "No such attribute in RocksDB stats: " + attribute); - } - } - } - try { - return statistics.getTickerCount(TickerType.valueOf(attribute)); - } catch (IllegalArgumentException ex) { - throw new AttributeNotFoundException( - "No such attribute in RocksDB stats: " + attribute); - } - } - - @Override - public void setAttribute(Attribute attribute) - throws AttributeNotFoundException, InvalidAttributeValueException, - MBeanException, ReflectionException { - - } - - @Override - public AttributeList getAttributes(String[] attributes) { - AttributeList result = new AttributeList(); - for (String attributeName : attributes) { - try { - Object value = getAttribute(attributeName); - result.add(value); - } catch (Exception e) { - //TODO - } - } - return result; - } - - @Override - public AttributeList setAttributes(AttributeList attributes) { - return null; - } - - @Override - public Object invoke(String actionName, Object[] params, String[] signature) - throws MBeanException, ReflectionException { - return null; - } - - @Override - public MBeanInfo getMBeanInfo() { - - List attributes = new ArrayList<>(); - for (TickerType tickerType : TickerType.values()) { - attributes.add(new MBeanAttributeInfo(tickerType.name(), "long", - "RocksDBStat: " + tickerType.name(), true, false, false)); - } - for (HistogramType histogramType : HistogramType.values()) { - for (String histogramAttribute : histogramAttributes) { - attributes.add(new MBeanAttributeInfo( - histogramType.name() + "_" + histogramAttribute.toUpperCase(), - "long", "RocksDBStat: " + histogramType.name(), true, false, - false)); - } - } - - return new MBeanInfo("", "RocksDBStat", - attributes.toArray(new MBeanAttributeInfo[0]), null, null, null); - - } - - @Override - public void getMetrics(MetricsCollector metricsCollector, boolean b) { - MetricsRecordBuilder rb = metricsCollector.addRecord(contextName); - getHistogramData(rb); - getTickerTypeData(rb); - } - - /** - * Collect all histogram metrics from RocksDB statistics. - * @param rb Metrics Record Builder. - */ - private void getHistogramData(MetricsRecordBuilder rb) { - for (HistogramType histogramType : HistogramType.values()) { - HistogramData histogram = - statistics.getHistogramData( - HistogramType.valueOf(histogramType.name())); - for (String histogramAttribute : histogramAttributes) { - try { - Method method = - HistogramData.class.getMethod("get" + histogramAttribute); - double metricValue = (double) method.invoke(histogram); - rb.addGauge(Interns.info(histogramType.name() + "_" + - histogramAttribute.toUpperCase(), "RocksDBStat"), - metricValue); - } catch (Exception e) { - LOG.error("Error reading histogram data {} ", e); - } - } - } - } - - /** - * Collect all Counter metrics from RocksDB statistics. - * @param rb Metrics Record Builder. - */ - private void getTickerTypeData(MetricsRecordBuilder rb) { - for (TickerType tickerType : TickerType.values()) { - rb.addCounter(Interns.info(tickerType.name(), "RocksDBStat"), - statistics.getTickerCount(tickerType)); - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/Scheduler.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/Scheduler.java deleted file mode 100644 index 9edc104481016..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/Scheduler.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; - -import org.apache.ratis.util.function.CheckedRunnable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -/** - * This class encapsulates ScheduledExecutorService. - */ -public class Scheduler { - - private static final Logger LOG = - LoggerFactory.getLogger(Scheduler.class); - - private ScheduledExecutorService scheduler; - - private volatile boolean isClosed; - - private String threadName; - - /** - * Creates a ScheduledExecutorService based on input arguments. - * @param threadName - thread name - * @param isDaemon - if true the threads in the scheduler are started as - * daemon - * @param numCoreThreads - number of core threads to maintain in the scheduler - */ - public Scheduler(String threadName, boolean isDaemon, int numCoreThreads) { - scheduler = Executors.newScheduledThreadPool(numCoreThreads, r -> { - Thread t = new Thread(r); - t.setName(threadName); - t.setDaemon(isDaemon); - return t; - }); - this.threadName = threadName; - isClosed = false; - } - - public void schedule(Runnable runnable, long delay, TimeUnit timeUnit) { - scheduler.schedule(runnable, delay, timeUnit); - } - - public void schedule(CheckedRunnable runnable, long delay, - TimeUnit timeUnit, Logger logger, String errMsg) { - scheduler.schedule(() -> { - try { - runnable.run(); - } catch (Throwable throwable) { - logger.error(errMsg, throwable); - } - }, delay, timeUnit); - } - - public void scheduleWithFixedDelay(Runnable runnable, long initialDelay, - long fixedDelay, TimeUnit timeUnit) { - scheduler - .scheduleWithFixedDelay(runnable, initialDelay, fixedDelay, timeUnit); - } - - public boolean isClosed() { - return isClosed; - } - - /** - * Closes the scheduler for further task submission. Any pending tasks not - * yet executed are also cancelled. For the executing tasks the scheduler - * waits 60 seconds for completion. - */ - public synchronized void close() { - isClosed = true; - if (scheduler != null) { - scheduler.shutdownNow(); - try { - scheduler.awaitTermination(60, TimeUnit.SECONDS); - } catch (InterruptedException e) { - LOG.info( - threadName + " interrupted while waiting for task completion {}", - e); - } - } - scheduler = null; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java deleted file mode 100644 index 091453612577a..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils; - -import org.apache.hadoop.hdds.HddsUtils; - -/** - * This class uses system current time milliseconds to generate unique id. - */ -public final class UniqueId { - /* - * When we represent time in milliseconds using 'long' data type, - * the LSB bits are used. Currently we are only using 44 bits (LSB), - * 20 bits (MSB) are not used. - * We will exhaust this 44 bits only when we are in year 2525, - * until then we can safely use this 20 bits (MSB) for offset to generate - * unique id within millisecond. - * - * Year : Mon Dec 31 18:49:04 IST 2525 - * TimeInMillis: 17545641544247 - * Binary Representation: - * MSB (20 bits): 0000 0000 0000 0000 0000 - * LSB (44 bits): 1111 1111 0101 0010 1001 1011 1011 0100 1010 0011 0111 - * - * We have 20 bits to run counter, we should exclude the first bit (MSB) - * as we don't want to deal with negative values. - * To be on safer side we will use 'short' data type which is of length - * 16 bits and will give us 65,536 values for offset. - * - */ - - private static volatile short offset = 0; - - /** - * Private constructor so that no one can instantiate this class. - */ - private UniqueId() {} - - /** - * Calculate and returns next unique id based on System#currentTimeMillis. - * - * @return unique long value - */ - public static synchronized long next() { - long utcTime = HddsUtils.getUtcTime(); - if ((utcTime & 0xFFFF000000000000L) == 0) { - return utcTime << Short.SIZE | (offset++ & 0x0000FFFF); - } - throw new RuntimeException("Got invalid UTC time," + - " cannot generate unique Id. UTC Time: " + utcTime); - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java deleted file mode 100644 index ca9f859ccb403..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.util.ThreadUtil; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.io.InputStream; -import java.util.Properties; - -/** - * This class returns build information about Hadoop components. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class VersionInfo { - - private final Properties info = new Properties(); - - public VersionInfo(String component) { - String versionInfoFile = component + "-version-info.properties"; - InputStream is = null; - try { - is = ThreadUtil.getResourceAsStream( - getClass().getClassLoader(), - versionInfoFile); - info.load(is); - } catch (IOException ex) { - LoggerFactory.getLogger(getClass()).warn("Could not read '" + - versionInfoFile + "', " + ex.toString(), ex); - } finally { - IOUtils.closeStream(is); - } - } - - public String getRelease() { - return info.getProperty("release", "Unknown"); - } - - public String getVersion() { - return info.getProperty("version", "Unknown"); - } - - public String getRevision() { - return info.getProperty("revision", "Unknown"); - } - - public String getBranch() { - return info.getProperty("branch", "Unknown"); - } - - public String getDate() { - return info.getProperty("date", "Unknown"); - } - - public String getUser() { - return info.getProperty("user", "Unknown"); - } - - public String getUrl() { - return info.getProperty("url", "Unknown"); - } - - public String getSrcChecksum() { - return info.getProperty("srcChecksum", "Unknown"); - } - - public String getProtocVersion() { - return info.getProperty("protocVersion", "Unknown"); - } - - public String getBuildVersion() { - return getVersion() + - " from " + getRevision() + - " by " + getUser() + - " source checksum " + getSrcChecksum(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BatchOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BatchOperation.java deleted file mode 100644 index 8ca5d188ebcd7..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BatchOperation.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -/** - * Class represents a batch operation, collects multiple db operation. - */ -public interface BatchOperation extends AutoCloseable { - - void close(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayKeyValue.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayKeyValue.java deleted file mode 100644 index 7c602911fe490..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayKeyValue.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -import org.apache.hadoop.hdds.utils.db.Table.KeyValue; - -/** - * Key value for raw Table implementations. - */ -public final class ByteArrayKeyValue implements KeyValue { - private byte[] key; - private byte[] value; - - private ByteArrayKeyValue(byte[] key, byte[] value) { - this.key = key; - this.value = value; - } - - /** - * Create a KeyValue pair. - * - * @param key - Key Bytes - * @param value - Value bytes - * @return KeyValue object. - */ - public static ByteArrayKeyValue create(byte[] key, byte[] value) { - return new ByteArrayKeyValue(key, value); - } - - /** - * Return key. - * - * @return byte[] - */ - public byte[] getKey() { - byte[] result = new byte[key.length]; - System.arraycopy(key, 0, result, 0, key.length); - return result; - } - - /** - * Return value. - * - * @return byte[] - */ - public byte[] getValue() { - byte[] result = new byte[value.length]; - System.arraycopy(value, 0, result, 0, value.length); - return result; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java deleted file mode 100644 index 36ece3ea774cc..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; - -/** - * Codec interface to marshall/unmarshall data to/from a byte[] based - * key/value store. - * - * @param Unserialized type - */ -public interface Codec { - - /** - * Convert object to raw persisted format. - * @param object The original java object. Should not be null. - */ - byte[] toPersistedFormat(T object) throws IOException; - - /** - * Convert object from raw persisted format. - * - * @param rawData Byte array from the key/value store. Should not be null. - */ - T fromPersistedFormat(byte[] rawData) throws IOException; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java deleted file mode 100644 index f92189aef5bac..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import com.google.common.base.Preconditions; - -/** - * Collection of available codecs. - */ -public class CodecRegistry { - - private Map> valueCodecs; - - public CodecRegistry() { - valueCodecs = new HashMap<>(); - valueCodecs.put(String.class, new StringCodec()); - valueCodecs.put(Long.class, new LongCodec()); - } - - /** - * Convert raw value to strongly typed value/key with the help of a codec. - * - * @param rawData original byte array from the db. - * @param format Class of the return value - * @param Type of the return value. - * @return the object with the parsed field data - */ - public T asObject(byte[] rawData, Class format) - throws IOException { - if (rawData == null) { - return null; - } - Codec codec = getCodec(format); - return (T) codec.fromPersistedFormat(rawData); - } - - /** - * Convert strongly typed object to raw data to store it in the kv store. - * - * @param object typed object. - * @param Type of the typed object. - * @return byte array to store it ini the kv store. - */ - public byte[] asRawData(T object) throws IOException { - Preconditions.checkNotNull(object, - "Null value shouldn't be persisted in the database"); - Codec codec = getCodec(object); - return codec.toPersistedFormat(object); - } - - /** - * Get codec for the typed object including class and subclass. - * @param object typed object. - * @return Codec for the typed object. - * @throws IOException - */ - private Codec getCodec(T object) throws IOException { - Class format = (Class) object.getClass(); - return getCodec(format); - } - - - /** - * Get codec for the typed object including class and subclass. - * @param Type of the typed object. - * @return Codec for the typed object. - * @throws IOException - */ - private Codec getCodec(Class format) throws IOException { - Codec codec; - if (valueCodecs.containsKey(format)) { - codec = (Codec) valueCodecs.get(format); - } else if (valueCodecs.containsKey(format.getSuperclass())) { - codec = (Codec) valueCodecs.get(format.getSuperclass()); - } else { - throw new IllegalStateException( - "Codec is not registered for type: " + format); - } - return codec; - } - - /** - * Addds codec to the internal collection. - * - * @param type Type of the codec source/destination object. - * @param codec The codec itself. - * @param The type of the codec - */ - public void addCodec(Class type, Codec codec) { - valueCodecs.put(type, codec); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBCheckpoint.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBCheckpoint.java deleted file mode 100644 index 6a45298cb6a3c..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBCheckpoint.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.nio.file.Path; - -/** - * Generic DB Checkpoint interface. - */ -public interface DBCheckpoint { - - /** - * Get Snapshot location. - */ - Path getCheckpointLocation(); - - /** - * Get Snapshot creation timestamp. - */ - long getCheckpointTimestamp(); - - /** - * Get last sequence number of Snapshot. - */ - long getLatestSequenceNumber(); - - /** - * Time taken in milliseconds for the checkpoint to be created. - */ - long checkpointCreationTimeTaken(); - - /** - * Destroy the contents of the specified checkpoint to ensure - * proper cleanup of the footprint on disk. - * - * @throws IOException if I/O error happens - */ - void cleanupCheckpoint() throws IOException; - - /** - * Set the OM Ratis snapshot index corresponding to the OM DB checkpoint. - * The snapshot index is the latest snapshot index saved by ratis - * snapshots. It is not guaranteed to be the last ratis index applied to - * the OM DB state. - * @param omRatisSnapshotIndex the saved ratis snapshot index - */ - void setRatisSnapshotIndex(long omRatisSnapshotIndex); - - /** - * Get the OM Ratis snapshot index corresponding to the OM DB checkpoint. - * The ratis snapshot index indicates upto which index is definitely - * included in the DB checkpoint. It is not guaranteed to be the last ratis - * log index applied to the DB checkpoint. - */ - long getRatisSnapshotIndex(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java deleted file mode 100644 index 43754255eabfd..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import com.google.common.base.Preconditions; -import org.eclipse.jetty.util.StringUtil; -import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.DBOptions; -import org.rocksdb.Env; -import org.rocksdb.OptionsUtil; -import org.rocksdb.RocksDBException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.List; - -/** - * A Class that controls the standard config options of RocksDB. - *

- * Important : Some of the functions in this file are magic functions designed - * for the use of OZONE developers only. Due to that this information is - * documented in this files only and is *not* intended for end user consumption. - * Please do not use this information to tune your production environments. - * Please remember the SpiderMan principal; with great power comes great - * responsibility. - */ -public final class DBConfigFromFile { - private static final Logger LOG = - LoggerFactory.getLogger(DBConfigFromFile.class); - - public static final String CONFIG_DIR = "HADOOP_CONF_DIR"; - - private DBConfigFromFile() { - } - - public static File getConfigLocation() throws IOException { - String path = System.getenv(CONFIG_DIR); - - // Make testing easy. - // If there is No Env. defined, let us try to read the JVM property - if (StringUtil.isBlank(path)) { - path = System.getProperty(CONFIG_DIR); - } - - if (StringUtil.isBlank(path)) { - LOG.debug("Unable to find the configuration directory. " - + "Please make sure that HADOOP_CONF_DIR is setup correctly."); - } - if(StringUtil.isBlank(path)){ - return null; - } - return new File(path); - - } - - /** - * This class establishes a magic pattern where we look for DBFile.ini as the - * options for RocksDB. - * - * @param dbFileName - The DBFile Name. For example, OzoneManager.db - * @return Name of the DB File options - */ - public static String getOptionsFileNameFromDB(String dbFileName) { - Preconditions.checkNotNull(dbFileName); - return dbFileName + ".ini"; - } - - /** - * One of the Magic functions designed for the use of Ozone Developers *ONLY*. - * This function takes the name of DB file and looks up the a .ini file that - * follows the ROCKSDB config format and uses that file for DBOptions and - * Column family Options. The Format for this file is specified by RockDB. - *

- * Here is a sample config from RocksDB sample Repo. - *

- * https://github.com/facebook/rocksdb/blob/master/examples - * /rocksdb_option_file_example.ini - *

- * We look for a specific pattern, say OzoneManager.db will have its configs - * specified in OzoneManager.db.ini. This option is used only by the - * performance testing group to allow tuning of all parameters freely. - *

- * For the end users we offer a set of Predefined options that is easy to use - * and the user does not need to become an expert in RockDB config. - *

- * This code assumes the .ini file is placed in the same directory as normal - * config files. That is in $HADOOP_DIR/etc/hadoop. For example, if we want to - * control OzoneManager.db configs from a file, we need to create a file - * called OzoneManager.db.ini and place that file in $HADOOP_DIR/etc/hadoop. - * - * @param dbFileName - The DB File Name, for example, OzoneManager.db. - * @param cfDescs - ColumnFamily Handles. - * @return DBOptions, Options to be used for opening/creating the DB. - * @throws IOException - */ - public static DBOptions readFromFile(String dbFileName, - List cfDescs) throws IOException { - Preconditions.checkNotNull(dbFileName); - Preconditions.checkNotNull(cfDescs); - Preconditions.checkArgument(cfDescs.size() > 0); - - //TODO: Add Documentation on how to support RocksDB Mem Env. - Env env = Env.getDefault(); - DBOptions options = null; - File configLocation = getConfigLocation(); - if(configLocation != null && - StringUtil.isNotBlank(configLocation.toString())){ - Path optionsFile = Paths.get(configLocation.toString(), - getOptionsFileNameFromDB(dbFileName)); - - if (optionsFile.toFile().exists()) { - options = new DBOptions(); - try { - OptionsUtil.loadOptionsFromFile(optionsFile.toString(), - env, options, cfDescs, true); - - } catch (RocksDBException rdEx) { - RDBTable.toIOException("Unable to find/open Options file.", rdEx); - } - } - } - return options; - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java deleted file mode 100644 index 57516fd89a40b..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import org.apache.hadoop.conf.StorageUnit; -import org.rocksdb.BlockBasedTableConfig; -import org.rocksdb.BloomFilter; -import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.CompactionStyle; -import org.rocksdb.DBOptions; - -import java.math.BigDecimal; - -/** - * User visible configs based RocksDB tuning page. Documentation for Options. - *

- * https://github.com/facebook/rocksdb/blob/master/include/rocksdb/options.h - *

- * Most tuning parameters are based on this URL. - *

- * https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning - */ -public enum DBProfile { - //TODO : Add more profiles like TEST etc. - SSD { - @Override - public String toString() { - return "DBProfile.SSD"; - } - - @Override - public ColumnFamilyOptions getColumnFamilyOptions() { - - // Set BlockCacheSize to 256 MB. This should not be an issue for HADOOP. - final long blockCacheSize = toLong(StorageUnit.MB.toBytes(256.00)); - - // Set the Default block size to 16KB - final long blockSize = toLong(StorageUnit.KB.toBytes(16)); - - // Write Buffer Size -- set to 128 MB - final long writeBufferSize = toLong(StorageUnit.MB.toBytes(128)); - - return new ColumnFamilyOptions() - .setLevelCompactionDynamicLevelBytes(true) - .setWriteBufferSize(writeBufferSize) - .setTableFormatConfig( - new BlockBasedTableConfig() - .setBlockCacheSize(blockCacheSize) - .setBlockSize(blockSize) - .setCacheIndexAndFilterBlocks(true) - .setPinL0FilterAndIndexBlocksInCache(true) - .setFilter(new BloomFilter())); - } - - @Override - public DBOptions getDBOptions() { - final int maxBackgroundCompactions = 4; - final int maxBackgroundFlushes = 2; - final long bytesPerSync = toLong(StorageUnit.MB.toBytes(1.00)); - final boolean createIfMissing = true; - final boolean createMissingColumnFamilies = true; - return new DBOptions() - .setIncreaseParallelism(Runtime.getRuntime().availableProcessors()) - .setMaxBackgroundCompactions(maxBackgroundCompactions) - .setMaxBackgroundFlushes(maxBackgroundFlushes) - .setBytesPerSync(bytesPerSync) - .setCreateIfMissing(createIfMissing) - .setCreateMissingColumnFamilies(createMissingColumnFamilies); - } - - - }, - DISK { - @Override - public String toString() { - return "DBProfile.DISK"; - } - - @Override - public DBOptions getDBOptions() { - final long readAheadSize = toLong(StorageUnit.MB.toBytes(4.00)); - return SSD.getDBOptions().setCompactionReadaheadSize(readAheadSize); - } - - @Override - public ColumnFamilyOptions getColumnFamilyOptions() { - ColumnFamilyOptions columnFamilyOptions = SSD.getColumnFamilyOptions(); - columnFamilyOptions.setCompactionStyle(CompactionStyle.LEVEL); - return columnFamilyOptions; - } - - - }; - - private static long toLong(double value) { - BigDecimal temp = new BigDecimal(value); - return temp.longValue(); - } - - public abstract DBOptions getDBOptions(); - - public abstract ColumnFamilyOptions getColumnFamilyOptions(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java deleted file mode 100644 index b3f58384203fc..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Map; - -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl; - -/** - * The DBStore interface provides the ability to create Tables, which store - * a specific type of Key-Value pair. Some DB interfaces like LevelDB will not - * be able to do this. In those case a Table creation will map to a default - * store. - * - */ -@InterfaceStability.Evolving -public interface DBStore extends AutoCloseable { - - /** - * Gets an existing TableStore. - * - * @param name - Name of the TableStore to get - * @return - TableStore. - * @throws IOException on Failure - */ - Table getTable(String name) throws IOException; - - - /** - * Gets an existing TableStore with implicit key/value conversion and - * with default cleanup policy for cache. Default cache clean up policy is - * manual. - * - * @param name - Name of the TableStore to get - * @param keyType - * @param valueType - * @return - TableStore. - * @throws IOException on Failure - */ - Table getTable(String name, - Class keyType, Class valueType) throws IOException; - - /** - * Gets an existing TableStore with implicit key/value conversion and - * with specified cleanup policy for cache. - * @throws IOException - */ - Table getTable(String name, - Class keyType, Class valueType, - TableCacheImpl.CacheCleanupPolicy cleanupPolicy) throws IOException; - - /** - * Lists the Known list of Tables in a DB. - * - * @return List of Tables, in case of Rocks DB and LevelDB we will return at - * least one entry called DEFAULT. - * @throws IOException on Failure - */ - ArrayList listTables() throws IOException; - - /** - * Flush the DB buffer onto persistent storage. - * @throws IOException - */ - void flush() throws IOException; - - /** - * Compact the entire database. - * - * @throws IOException on Failure - */ - void compactDB() throws IOException; - - /** - * Moves a key from the Source Table to the destination Table. - * - * @param key - Key to move. - * @param source - Source Table. - * @param dest - Destination Table. - * @throws IOException on Failure - */ - void move(KEY key, Table source, - Table dest) throws IOException; - - /** - * Moves a key from the Source Table to the destination Table and updates the - * destination to the new value. - * - * @param key - Key to move. - * @param value - new value to write to the destination table. - * @param source - Source Table. - * @param dest - Destination Table. - * @throws IOException on Failure - */ - void move(KEY key, VALUE value, Table source, - Table dest) - throws IOException; - - /** - * Moves a key from the Source Table to the destination Table and updates the - * destination with the new key name and value. - * This is similar to deleting an entry in one table and adding an entry in - * another table, here it is done atomically. - * - * @param sourceKey - Key to move. - * @param destKey - Destination key name. - * @param value - new value to write to the destination table. - * @param source - Source Table. - * @param dest - Destination Table. - * @throws IOException on Failure - */ - void move(KEY sourceKey, KEY destKey, VALUE value, - Table source, Table dest) - throws IOException; - - /** - * Returns an estimated count of keys in this DB. - * - * @return long, estimate of keys in the DB. - */ - long getEstimatedKeyCount() throws IOException; - - /** - * Initialize an atomic batch operation which can hold multiple PUT/DELETE - * operations and committed later in one step. - * - * @return BatchOperation holder which can be used to add or commit batch - * operations. - */ - BatchOperation initBatchOperation(); - - /** - * Commit the batch operations. - * - * @param operation which contains all the required batch operation. - * @throws IOException on Failure. - */ - void commitBatchOperation(BatchOperation operation) throws IOException; - - /** - * Get current snapshot of OM DB store as an artifact stored on - * the local filesystem. - * @return An object that encapsulates the checkpoint information along with - * location. - */ - DBCheckpoint getCheckpoint(boolean flush) throws IOException; - - /** - * Get DB Store location. - * @return DB file location. - */ - File getDbLocation(); - - /** - * Get List of Index to Table Names. - * (For decoding table from column family index) - * @return Map of Index -> TableName - */ - Map getTableNames(); - - /** - * Get Codec registry. - * @return codec registry. - */ - CodecRegistry getCodecRegistry(); - - /** - * Get data written to DB since a specific sequence number. - * @param sequenceNumber - * @return - * @throws SequenceNumberNotFoundException - */ - DBUpdatesWrapper getUpdatesSince(long sequenceNumber) - throws SequenceNumberNotFoundException; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java deleted file mode 100644 index 263864fede890..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ /dev/null @@ -1,243 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.DFSUtil; -import org.eclipse.jetty.util.StringUtil; -import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.DBOptions; -import org.rocksdb.InfoLogLevel; -import org.rocksdb.RocksDB; -import org.rocksdb.Statistics; -import org.rocksdb.StatsLevel; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Set; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_DB_PROFILE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF; - -/** - * DBStore Builder. - */ -public final class DBStoreBuilder { - private static final Logger LOG = - LoggerFactory.getLogger(DBStoreBuilder.class); - public static final Logger ROCKS_DB_LOGGER = - LoggerFactory.getLogger(RocksDB.class); - private Set tables; - private DBProfile dbProfile; - private DBOptions rocksDBOption; - private String dbname; - private Path dbPath; - private List tableNames; - private Configuration configuration; - private CodecRegistry registry; - private String rocksDbStat; - private RocksDBConfiguration rocksDBConfiguration; - - private DBStoreBuilder(OzoneConfiguration configuration) { - tables = new HashSet<>(); - tableNames = new LinkedList<>(); - this.configuration = configuration; - this.registry = new CodecRegistry(); - this.rocksDbStat = configuration.getTrimmed( - OZONE_METADATA_STORE_ROCKSDB_STATISTICS, - OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT); - this.rocksDBConfiguration = - configuration.getObject(RocksDBConfiguration.class); - } - - public static DBStoreBuilder newBuilder(OzoneConfiguration configuration) { - return new DBStoreBuilder(configuration); - } - - public DBStoreBuilder setProfile(DBProfile profile) { - dbProfile = profile; - return this; - } - - public DBStoreBuilder setName(String name) { - dbname = name; - return this; - } - - public DBStoreBuilder addTable(String tableName) { - tableNames.add(tableName); - return this; - } - - public DBStoreBuilder addCodec(Class type, Codec codec) { - registry.addCodec(type, codec); - return this; - } - - public DBStoreBuilder addTable(String tableName, ColumnFamilyOptions option) - throws IOException { - TableConfig tableConfig = new TableConfig(tableName, option); - if (!tables.add(tableConfig)) { - String message = "Unable to add the table: " + tableName + - ". Please check if this table name is already in use."; - LOG.error(message); - throw new IOException(message); - } - LOG.info("using custom profile for table: {}", tableName); - return this; - } - - public DBStoreBuilder setDBOption(DBOptions option) { - rocksDBOption = option; - return this; - } - - public DBStoreBuilder setPath(Path path) { - Preconditions.checkNotNull(path); - dbPath = path; - return this; - } - - /** - * Builds a DBStore instance and returns that. - * - * @return DBStore - */ - public DBStore build() throws IOException { - if(StringUtil.isBlank(dbname) || (dbPath == null)) { - LOG.error("Required Parameter missing."); - throw new IOException("Required parameter is missing. Please make sure " - + "sure Path and DB name is provided."); - } - processDBProfile(); - processTables(); - DBOptions options = getDbProfile(); - File dbFile = getDBFile(); - if (!dbFile.getParentFile().exists()) { - throw new IOException("The DB destination directory should exist."); - } - return new RDBStore(dbFile, options, tables, registry); - } - - /** - * if the DBProfile is not set, we will default to using default from the - * config file. - */ - private void processDBProfile() { - if (dbProfile == null) { - dbProfile = this.configuration.getEnum(HDDS_DB_PROFILE, - HDDS_DEFAULT_DB_PROFILE); - } - } - - private void processTables() throws IOException { - if (tableNames.size() > 0) { - for (String name : tableNames) { - addTable(name, dbProfile.getColumnFamilyOptions()); - LOG.info("Using default column profile:{} for Table:{}", - dbProfile.toString(), name); - } - } - addTable(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY), - dbProfile.getColumnFamilyOptions()); - LOG.info("Using default column profile:{} for Table:{}", - dbProfile.toString(), - DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY)); - } - - private DBOptions getDbProfile() { - if (rocksDBOption != null) { - return rocksDBOption; - } - DBOptions option = null; - if (StringUtil.isNotBlank(dbname)) { - List columnFamilyDescriptors = new LinkedList<>(); - - for (TableConfig tc : tables) { - columnFamilyDescriptors.add(tc.getDescriptor()); - } - - if (columnFamilyDescriptors.size() > 0) { - try { - option = DBConfigFromFile.readFromFile(dbname, - columnFamilyDescriptors); - if(option != null) { - LOG.info("Using Configs from {}.ini file", dbname); - } - } catch (IOException ex) { - LOG.info("Unable to read ROCKDB config", ex); - } - } - } - - if (option == null) { - LOG.info("Using default options. {}", dbProfile.toString()); - option = dbProfile.getDBOptions(); - } - - if (rocksDBConfiguration.isRocksdbLoggingEnabled()) { - org.rocksdb.Logger logger = new org.rocksdb.Logger(option) { - @Override - protected void log(InfoLogLevel infoLogLevel, String s) { - ROCKS_DB_LOGGER.info(s); - } - }; - InfoLogLevel level = InfoLogLevel.valueOf(rocksDBConfiguration - .getRocksdbLogLevel() + "_LEVEL"); - logger.setInfoLogLevel(level); - option.setLogger(logger); - } - - if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { - Statistics statistics = new Statistics(); - statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); - option = option.setStatistics(statistics); - } - return option; - } - - private File getDBFile() throws IOException { - if (dbPath == null) { - LOG.error("DB path is required."); - throw new IOException("A Path to for DB file is needed."); - } - - if (StringUtil.isBlank(dbname)) { - LOG.error("DBName is a required."); - throw new IOException("A valid DB name is required."); - } - return Paths.get(dbPath.toString(), dbname).toFile(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBUpdatesWrapper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBUpdatesWrapper.java deleted file mode 100644 index aa48c5e83b0d8..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBUpdatesWrapper.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.util.ArrayList; -import java.util.List; - -/** - * Wrapper class to hold DB data read from the RocksDB log file. - */ -public class DBUpdatesWrapper { - - private List dataList = new ArrayList<>(); - private long currentSequenceNumber = -1; - - public void addWriteBatch(byte[] data, long sequenceNumber) { - dataList.add(data); - if (currentSequenceNumber < sequenceNumber) { - currentSequenceNumber = sequenceNumber; - } - } - - public List getData() { - return dataList; - } - - public void setCurrentSequenceNumber(long sequenceNumber) { - this.currentSequenceNumber = sequenceNumber; - } - - public long getCurrentSequenceNumber() { - return currentSequenceNumber; - } -} - diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java deleted file mode 100644 index e95e0f1b757c9..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; - -import com.google.common.primitives.Ints; - -/** - * Codec to convert Integer to/from byte array. - */ -public class IntegerCodec implements Codec { - @Override - public byte[] toPersistedFormat(Integer object) throws IOException { - return Ints.toByteArray(object); - } - - @Override - public Integer fromPersistedFormat(byte[] rawData) throws IOException { - return Ints.fromByteArray(rawData); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java deleted file mode 100644 index 6c95246ebea72..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -import com.google.common.primitives.Longs; - - -/** - * Codec to convert Long to/from byte array. - */ -public class LongCodec implements Codec { - - @Override - public byte[] toPersistedFormat(Long object) { - if (object != null) { - return Longs.toByteArray(object); - } else { - return null; - } - } - - @Override - public Long fromPersistedFormat(byte[] rawData) { - if (rawData != null) { - return Longs.fromByteArray(rawData); - } else { - return null; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java deleted file mode 100644 index 42843b080d738..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; - -import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; -import org.rocksdb.WriteBatch; -import org.rocksdb.WriteOptions; - -/** - * Batch operation implementation for rocks db. - */ -public class RDBBatchOperation implements BatchOperation { - - private final WriteBatch writeBatch; - - public RDBBatchOperation() { - writeBatch = new WriteBatch(); - } - - public RDBBatchOperation(WriteBatch writeBatch) { - this.writeBatch = writeBatch; - } - - public void commit(RocksDB db, WriteOptions writeOptions) throws IOException { - try { - db.write(writeOptions, writeBatch); - } catch (RocksDBException e) { - throw new IOException("Unable to write the batch.", e); - } - } - - @Override - public void close() { - writeBatch.close(); - } - - public void delete(ColumnFamilyHandle handle, byte[] key) throws IOException { - try { - writeBatch.delete(handle, key); - } catch (RocksDBException e) { - throw new IOException("Can't record batch delete operation.", e); - } - } - - public void put(ColumnFamilyHandle handle, byte[] key, byte[] value) - throws IOException { - try { - writeBatch.put(handle, key, value); - } catch (RocksDBException e) { - throw new IOException("Can't record batch put operation.", e); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java deleted file mode 100644 index 42b9b77d2d874..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.nio.file.Path; -import java.nio.file.Paths; -import java.time.Duration; -import java.time.Instant; - -import org.apache.commons.lang3.StringUtils; -import org.rocksdb.Checkpoint; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * RocksDB Checkpoint Manager, used to create and cleanup checkpoints. - */ -public class RDBCheckpointManager { - - private final Checkpoint checkpoint; - private final RocksDB db; - public static final String RDB_CHECKPOINT_DIR_PREFIX = "rdb_checkpoint_"; - private static final Logger LOG = - LoggerFactory.getLogger(RDBCheckpointManager.class); - private String checkpointNamePrefix = ""; - - public RDBCheckpointManager(RocksDB rocksDB) { - this.db = rocksDB; - this.checkpoint = Checkpoint.create(rocksDB); - } - - /** - * Create a checkpoint manager with a prefix to be added to the - * snapshots created. - * - * @param rocksDB DB instance - * @param checkpointPrefix prefix string. - */ - public RDBCheckpointManager(RocksDB rocksDB, String checkpointPrefix) { - this.db = rocksDB; - this.checkpointNamePrefix = checkpointPrefix; - this.checkpoint = Checkpoint.create(rocksDB); - } - - /** - * Create RocksDB snapshot by saving a checkpoint to a directory. - * - * @param parentDir The directory where the checkpoint needs to be created. - * @return RocksDB specific Checkpoint information object. - */ - public RocksDBCheckpoint createCheckpoint(String parentDir) { - try { - long currentTime = System.currentTimeMillis(); - - String checkpointDir = StringUtils.EMPTY; - if (StringUtils.isNotEmpty(checkpointNamePrefix)) { - checkpointDir += checkpointNamePrefix; - } - checkpointDir += "_" + RDB_CHECKPOINT_DIR_PREFIX + currentTime; - - Path checkpointPath = Paths.get(parentDir, checkpointDir); - Instant start = Instant.now(); - checkpoint.createCheckpoint(checkpointPath.toString()); - Instant end = Instant.now(); - - long duration = Duration.between(start, end).toMillis(); - LOG.info("Created checkpoint at " + checkpointPath.toString() + " in " - + duration + " milliseconds"); - - return new RocksDBCheckpoint( - checkpointPath, - currentTime, - db.getLatestSequenceNumber(), //Best guesstimate here. Not accurate. - duration); - - } catch (RocksDBException e) { - LOG.error("Unable to create RocksDB Snapshot.", e); - } - return null; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java deleted file mode 100644 index 53bd424642afa..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java +++ /dev/null @@ -1,381 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_CHECKPOINTS_DIR_NAME; - -import javax.management.ObjectName; -import java.io.File; -import java.io.IOException; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Hashtable; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.utils.RocksDBStoreMBean; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.metrics2.util.MBeans; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl; -import org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.DBOptions; -import org.rocksdb.FlushOptions; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; -import org.rocksdb.TransactionLogIterator; -import org.rocksdb.WriteOptions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * RocksDB Store that supports creating Tables in DB. - */ -public class RDBStore implements DBStore { - private static final Logger LOG = - LoggerFactory.getLogger(RDBStore.class); - private RocksDB db; - private File dbLocation; - private final WriteOptions writeOptions; - private final DBOptions dbOptions; - private final CodecRegistry codecRegistry; - private final Hashtable handleTable; - private ObjectName statMBeanName; - private RDBCheckpointManager checkPointManager; - private String checkpointsParentDir; - private List columnFamilyHandles; - - @VisibleForTesting - public RDBStore(File dbFile, DBOptions options, - Set families) throws IOException { - this(dbFile, options, families, new CodecRegistry()); - } - - public RDBStore(File dbFile, DBOptions options, Set families, - CodecRegistry registry) - throws IOException { - Preconditions.checkNotNull(dbFile, "DB file location cannot be null"); - Preconditions.checkNotNull(families); - Preconditions.checkArgument(families.size() > 0); - handleTable = new Hashtable<>(); - codecRegistry = registry; - final List columnFamilyDescriptors = - new ArrayList<>(); - columnFamilyHandles = new ArrayList<>(); - - for (TableConfig family : families) { - columnFamilyDescriptors.add(family.getDescriptor()); - } - - dbOptions = options; - dbLocation = dbFile; - // TODO: Read from the next Config. - writeOptions = new WriteOptions(); - - try { - db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath(), - columnFamilyDescriptors, columnFamilyHandles); - - for (int x = 0; x < columnFamilyHandles.size(); x++) { - handleTable.put( - DFSUtil.bytes2String(columnFamilyHandles.get(x).getName()), - columnFamilyHandles.get(x)); - } - - if (dbOptions.statistics() != null) { - Map jmxProperties = new HashMap<>(); - jmxProperties.put("dbName", dbFile.getName()); - statMBeanName = HddsUtils.registerWithJmxProperties( - "Ozone", "RocksDbStore", jmxProperties, - RocksDBStoreMBean.create(dbOptions.statistics(), - dbFile.getName())); - if (statMBeanName == null) { - LOG.warn("jmx registration failed during RocksDB init, db path :{}", - dbFile.getAbsolutePath()); - } - } - - //create checkpoints directory if not exists. - checkpointsParentDir = Paths.get(dbLocation.getParent(), - OM_DB_CHECKPOINTS_DIR_NAME).toString(); - File checkpointsDir = new File(checkpointsParentDir); - if (!checkpointsDir.exists()) { - boolean success = checkpointsDir.mkdir(); - if (!success) { - LOG.warn("Unable to create RocksDB checkpoint directory"); - } - } - - //Initialize checkpoint manager - checkPointManager = new RDBCheckpointManager(db, "om"); - - } catch (RocksDBException e) { - throw toIOException( - "Failed init RocksDB, db path : " + dbFile.getAbsolutePath(), e); - } - - if (LOG.isDebugEnabled()) { - LOG.debug("RocksDB successfully opened."); - LOG.debug("[Option] dbLocation= {}", dbLocation.getAbsolutePath()); - LOG.debug("[Option] createIfMissing = {}", options.createIfMissing()); - LOG.debug("[Option] maxOpenFiles= {}", options.maxOpenFiles()); - } - } - - public static IOException toIOException(String msg, RocksDBException e) { - String statusCode = e.getStatus() == null ? "N/A" : - e.getStatus().getCodeString(); - String errMessage = e.getMessage() == null ? "Unknown error" : - e.getMessage(); - String output = msg + "; status : " + statusCode - + "; message : " + errMessage; - return new IOException(output, e); - } - - @Override - public void compactDB() throws IOException { - if (db != null) { - try { - db.compactRange(); - } catch (RocksDBException e) { - throw toIOException("Failed to compact db", e); - } - } - } - - @Override - public void close() throws IOException { - - for (final ColumnFamilyHandle handle : handleTable.values()) { - handle.close(); - } - - if (statMBeanName != null) { - MBeans.unregister(statMBeanName); - statMBeanName = null; - } - - if (db != null) { - db.close(); - } - - if (dbOptions != null) { - dbOptions.close(); - } - - if (writeOptions != null) { - writeOptions.close(); - } - } - - @Override - public void move(KEY key, Table source, - Table dest) throws IOException { - try (BatchOperation batchOperation = initBatchOperation()) { - - VALUE value = source.get(key); - dest.putWithBatch(batchOperation, key, value); - source.deleteWithBatch(batchOperation, key); - commitBatchOperation(batchOperation); - } - } - - @Override - public void move(KEY key, VALUE value, Table source, - Table dest) throws IOException { - move(key, key, value, source, dest); - } - - @Override - public void move(KEY sourceKey, KEY destKey, VALUE value, - Table source, - Table dest) throws IOException { - try (BatchOperation batchOperation = initBatchOperation()) { - dest.putWithBatch(batchOperation, destKey, value); - source.deleteWithBatch(batchOperation, sourceKey); - commitBatchOperation(batchOperation); - } - } - - @Override - public long getEstimatedKeyCount() throws IOException { - try { - return db.getLongProperty("rocksdb.estimate-num-keys"); - } catch (RocksDBException e) { - throw toIOException("Unable to get the estimated count.", e); - } - } - - @Override - public BatchOperation initBatchOperation() { - return new RDBBatchOperation(); - } - - @Override - public void commitBatchOperation(BatchOperation operation) - throws IOException { - ((RDBBatchOperation) operation).commit(db, writeOptions); - } - - - @VisibleForTesting - protected ObjectName getStatMBeanName() { - return statMBeanName; - } - - @Override - public Table getTable(String name) throws IOException { - ColumnFamilyHandle handle = handleTable.get(name); - if (handle == null) { - throw new IOException("No such table in this DB. TableName : " + name); - } - return new RDBTable(this.db, handle, this.writeOptions); - } - - @Override - public Table getTable(String name, - Class keyType, Class valueType) throws IOException { - return new TypedTable(getTable(name), codecRegistry, keyType, - valueType); - } - - @Override - public Table getTable(String name, - Class keyType, Class valueType, - TableCacheImpl.CacheCleanupPolicy cleanupPolicy) throws IOException { - return new TypedTable(getTable(name), codecRegistry, keyType, - valueType, cleanupPolicy); - } - - @Override - public ArrayList

listTables() throws IOException { - ArrayList
returnList = new ArrayList<>(); - for (ColumnFamilyHandle handle : handleTable.values()) { - returnList.add(new RDBTable(db, handle, writeOptions)); - } - return returnList; - } - - @Override - public void flush() throws IOException { - final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(true); - try { - db.flush(flushOptions); - } catch (RocksDBException e) { - LOG.error("Unable to Flush RocksDB data", e); - throw toIOException("Unable to Flush RocksDB data", e); - } - } - - @Override - public DBCheckpoint getCheckpoint(boolean flush) { - final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(flush); - try { - db.flush(flushOptions); - } catch (RocksDBException e) { - LOG.error("Unable to Flush RocksDB data before creating snapshot", e); - } - return checkPointManager.createCheckpoint(checkpointsParentDir); - } - - @Override - public File getDbLocation() { - return dbLocation; - } - - @Override - public Map getTableNames() { - Map tableNames = new HashMap<>(); - StringCodec stringCodec = new StringCodec(); - - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { - try { - tableNames.put(columnFamilyHandle.getID(), stringCodec - .fromPersistedFormat(columnFamilyHandle.getName())); - } catch (RocksDBException | IOException e) { - LOG.error("Unexpected exception while reading column family handle " + - "name", e); - } - } - return tableNames; - } - - @Override - public CodecRegistry getCodecRegistry() { - return codecRegistry; - } - - @Override - public DBUpdatesWrapper getUpdatesSince(long sequenceNumber) - throws SequenceNumberNotFoundException { - - DBUpdatesWrapper dbUpdatesWrapper = new DBUpdatesWrapper(); - try { - TransactionLogIterator transactionLogIterator = - db.getUpdatesSince(sequenceNumber); - - // Only the first record needs to be checked if its seq number < - // ( 1 + passed_in_sequence_number). For example, if seqNumber passed - // in is 100, then we can read from the WAL ONLY if the first sequence - // number is <= 101. If it is 102, then 101 may already be flushed to - // SST. If it 99, we can skip 99 and 100, and then read from 101. - - boolean checkValidStartingSeqNumber = true; - - while (transactionLogIterator.isValid()) { - TransactionLogIterator.BatchResult result = - transactionLogIterator.getBatch(); - long currSequenceNumber = result.sequenceNumber(); - if (checkValidStartingSeqNumber && - currSequenceNumber > 1 + sequenceNumber) { - throw new SequenceNumberNotFoundException("Unable to read data from" + - " RocksDB wal to get delta updates. It may have already been" + - "flushed to SSTs."); - } - // If the above condition was not satisfied, then it is OK to reset - // the flag. - checkValidStartingSeqNumber = false; - if (currSequenceNumber <= sequenceNumber) { - transactionLogIterator.next(); - continue; - } - dbUpdatesWrapper.addWriteBatch(result.writeBatch().data(), - result.sequenceNumber()); - transactionLogIterator.next(); - } - } catch (RocksDBException e) { - LOG.error("Unable to get delta updates since sequenceNumber {} ", - sequenceNumber, e); - } - return dbUpdatesWrapper; - } - - @VisibleForTesting - public RocksDB getDb() { - return db; - } - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java deleted file mode 100644 index 784738b0cec28..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.util.NoSuchElementException; -import java.util.function.Consumer; - -import org.rocksdb.RocksIterator; - -/** - * RocksDB store iterator. - */ -public class RDBStoreIterator - implements TableIterator { - - private RocksIterator rocksDBIterator; - - public RDBStoreIterator(RocksIterator iterator) { - this.rocksDBIterator = iterator; - rocksDBIterator.seekToFirst(); - } - - @Override - public void forEachRemaining( - Consumer action) { - while (hasNext()) { - action.accept(next()); - } - } - - @Override - public boolean hasNext() { - return rocksDBIterator.isValid(); - } - - @Override - public ByteArrayKeyValue next() { - if (rocksDBIterator.isValid()) { - ByteArrayKeyValue value = - ByteArrayKeyValue.create(rocksDBIterator.key(), rocksDBIterator - .value()); - rocksDBIterator.next(); - return value; - } - throw new NoSuchElementException("RocksDB Store has no more elements"); - } - - @Override - public void seekToFirst() { - rocksDBIterator.seekToFirst(); - } - - @Override - public void seekToLast() { - rocksDBIterator.seekToLast(); - } - - @Override - public ByteArrayKeyValue seek(byte[] key) { - rocksDBIterator.seek(key); - if (rocksDBIterator.isValid()) { - return ByteArrayKeyValue.create(rocksDBIterator.key(), - rocksDBIterator.value()); - } - return null; - } - - @Override - public byte[] key() { - if (rocksDBIterator.isValid()) { - return rocksDBIterator.key(); - } - return null; - } - - @Override - public ByteArrayKeyValue value() { - if (rocksDBIterator.isValid()) { - return ByteArrayKeyValue.create(rocksDBIterator.key(), - rocksDBIterator.value()); - } - return null; - } - - @Override - public void close() throws IOException { - rocksDBIterator.close(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java deleted file mode 100644 index 49ccc020922fa..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.DFSUtil; - -import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.ReadOptions; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; -import org.rocksdb.WriteOptions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * RocksDB implementation of ozone metadata store. This class should be only - * used as part of TypedTable as it's underlying implementation to access the - * metadata store content. All other user's using Table should use TypedTable. - */ -@InterfaceAudience.Private -class RDBTable implements Table { - - - private static final Logger LOG = - LoggerFactory.getLogger(RDBTable.class); - - private final RocksDB db; - private final ColumnFamilyHandle handle; - private final WriteOptions writeOptions; - - /** - * Constructs a TableStore. - * - * @param db - DBstore that we are using. - * @param handle - ColumnFamily Handle. - * @param writeOptions - RocksDB write Options. - */ - RDBTable(RocksDB db, ColumnFamilyHandle handle, - WriteOptions writeOptions) { - this.db = db; - this.handle = handle; - this.writeOptions = writeOptions; - } - - /** - * Converts RocksDB exception to IOE. - * @param msg - Message to add to exception. - * @param e - Original Exception. - * @return IOE. - */ - public static IOException toIOException(String msg, RocksDBException e) { - String statusCode = e.getStatus() == null ? "N/A" : - e.getStatus().getCodeString(); - String errMessage = e.getMessage() == null ? "Unknown error" : - e.getMessage(); - String output = msg + "; status : " + statusCode - + "; message : " + errMessage; - return new IOException(output, e); - } - - /** - * Returns the Column family Handle. - * - * @return ColumnFamilyHandle. - */ - public ColumnFamilyHandle getHandle() { - return handle; - } - - @Override - public void put(byte[] key, byte[] value) throws IOException { - try { - db.put(handle, writeOptions, key, value); - } catch (RocksDBException e) { - LOG.error("Failed to write to DB. Key: {}", new String(key, - StandardCharsets.UTF_8)); - throw toIOException("Failed to put key-value to metadata " - + "store", e); - } - } - - @Override - public void putWithBatch(BatchOperation batch, byte[] key, byte[] value) - throws IOException { - if (batch instanceof RDBBatchOperation) { - ((RDBBatchOperation) batch).put(getHandle(), key, value); - } else { - throw new IllegalArgumentException("batch should be RDBBatchOperation"); - } - } - - - @Override - public boolean isEmpty() throws IOException { - try (TableIterator keyIter = iterator()) { - keyIter.seekToFirst(); - return !keyIter.hasNext(); - } - } - - @Override - public boolean isExist(byte[] key) throws IOException { - try { - // RocksDB#keyMayExist - // If the key definitely does not exist in the database, then this - // method returns false, else true. - return db.keyMayExist(handle, key, new StringBuilder()) - && db.get(handle, key) != null; - } catch (RocksDBException e) { - throw toIOException( - "Error in accessing DB. ", e); - } - } - - @Override - public byte[] get(byte[] key) throws IOException { - try { - return db.get(handle, key); - } catch (RocksDBException e) { - throw toIOException( - "Failed to get the value for the given key", e); - } - } - - @Override - public void delete(byte[] key) throws IOException { - try { - db.delete(handle, key); - } catch (RocksDBException e) { - throw toIOException("Failed to delete the given key", e); - } - } - - @Override - public void deleteWithBatch(BatchOperation batch, byte[] key) - throws IOException { - if (batch instanceof RDBBatchOperation) { - ((RDBBatchOperation) batch).delete(getHandle(), key); - } else { - throw new IllegalArgumentException("batch should be RDBBatchOperation"); - } - - } - - @Override - public TableIterator iterator() { - ReadOptions readOptions = new ReadOptions(); - readOptions.setFillCache(false); - return new RDBStoreIterator(db.newIterator(handle, readOptions)); - } - - @Override - public String getName() throws IOException { - try { - return DFSUtil.bytes2String(this.getHandle().getName()); - } catch (RocksDBException rdbEx) { - throw toIOException("Unable to get the table name.", rdbEx); - } - } - - @Override - public void close() throws Exception { - // Nothing do for a Column Family. - } - - @Override - public long getEstimatedKeyCount() throws IOException { - try { - return db.getLongProperty(handle, "rocksdb.estimate-num-keys"); - } catch (RocksDBException e) { - throw toIOException( - "Failed to get estimated key count of table " + getName(), e); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java deleted file mode 100644 index 149743816c203..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.nio.file.Path; - -import org.apache.commons.io.FileUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class to hold information and location of a RocksDB Checkpoint. - */ -public class RocksDBCheckpoint implements DBCheckpoint { - - private static final Logger LOG = - LoggerFactory.getLogger(RocksDBCheckpoint.class); - - private Path checkpointLocation; - private long checkpointTimestamp = System.currentTimeMillis(); - private long latestSequenceNumber = -1; - private long checkpointCreationTimeTaken = 0L; - private long ratisSnapshotIndex = 0L; - - public RocksDBCheckpoint(Path checkpointLocation) { - this.checkpointLocation = checkpointLocation; - } - - public RocksDBCheckpoint(Path checkpointLocation, - long snapshotTimestamp, - long latestSequenceNumber, - long checkpointCreationTimeTaken) { - this.checkpointLocation = checkpointLocation; - this.checkpointTimestamp = snapshotTimestamp; - this.latestSequenceNumber = latestSequenceNumber; - this.checkpointCreationTimeTaken = checkpointCreationTimeTaken; - } - - @Override - public Path getCheckpointLocation() { - return this.checkpointLocation; - } - - @Override - public long getCheckpointTimestamp() { - return this.checkpointTimestamp; - } - - @Override - public long getLatestSequenceNumber() { - return this.latestSequenceNumber; - } - - @Override - public long checkpointCreationTimeTaken() { - return checkpointCreationTimeTaken; - } - - @Override - public void cleanupCheckpoint() throws IOException { - LOG.info("Cleaning up RocksDB checkpoint at " + - checkpointLocation.toString()); - FileUtils.deleteDirectory(checkpointLocation.toFile()); - } - - @Override - public void setRatisSnapshotIndex(long omRatisSnapshotIndex) { - this.ratisSnapshotIndex = omRatisSnapshotIndex; - } - - @Override - public long getRatisSnapshotIndex() { - return ratisSnapshotIndex; - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java deleted file mode 100644 index 1a8c846a3eb21..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db; - -import org.apache.hadoop.hdds.conf.Config; -import org.apache.hadoop.hdds.conf.ConfigGroup; -import org.apache.hadoop.hdds.conf.ConfigTag; -import org.apache.hadoop.hdds.conf.ConfigType; - -/** - * Holds configuration items for OM RocksDB. - */ -@ConfigGroup(prefix = "hadoop.hdds.db") -public class RocksDBConfiguration { - - private boolean rocksdbLogEnabled; - - @Config(key = "rocksdb.logging.enabled", - type = ConfigType.BOOLEAN, - defaultValue = "false", - tags = {ConfigTag.OM}, - description = "Enable/Disable RocksDB logging for OM.") - public void setRocksdbLoggingEnabled(boolean enabled) { - this.rocksdbLogEnabled = enabled; - } - - public boolean isRocksdbLoggingEnabled() { - return rocksdbLogEnabled; - } - - private String rocksdbLogLevel; - - @Config(key = "rocksdb.logging.level", - type = ConfigType.STRING, - defaultValue = "INFO", - tags = {ConfigTag.OM}, - description = "OM RocksDB logging level (INFO/DEBUG/WARN/ERROR/FATAL)") - public void setRocksdbLogLevel(String level) { - this.rocksdbLogLevel = level; - } - - public String getRocksdbLogLevel() { - return rocksdbLogLevel; - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/SequenceNumberNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/SequenceNumberNotFoundException.java deleted file mode 100644 index e9b4fa391ecf2..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/SequenceNumberNotFoundException.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; - -/** - * Thrown if RocksDB is unable to find requested data from WAL file. - */ -public class SequenceNumberNotFoundException extends IOException { - - public SequenceNumberNotFoundException() { - super(); - } - - public SequenceNumberNotFoundException(String message) { - super(message); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodec.java deleted file mode 100644 index f8237367c7245..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodec.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import org.apache.hadoop.hdfs.DFSUtil; - -/** - * Codec to convert String to/from byte array. - */ -public class StringCodec implements Codec { - - @Override - public byte[] toPersistedFormat(String object) throws IOException { - if (object != null) { - return DFSUtil.string2Bytes(object); - } else { - return null; - } - } - - @Override - public String fromPersistedFormat(byte[] rawData) throws IOException { - if (rawData != null) { - return DFSUtil.bytes2String(rawData); - } else { - return null; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java deleted file mode 100644 index 0502541e9c5c7..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.util.Iterator; -import java.util.Map; - -import org.apache.commons.lang3.NotImplementedException; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -/** - * Interface for key-value store that stores ozone metadata. Ozone metadata is - * stored as key value pairs, both key and value are arbitrary byte arrays. Each - * Table Stores a certain kind of keys and values. This allows a DB to have - * different kind of tables. - */ -@InterfaceStability.Evolving -public interface Table extends AutoCloseable { - - /** - * Puts a key-value pair into the store. - * - * @param key metadata key - * @param value metadata value - */ - void put(KEY key, VALUE value) throws IOException; - - /** - * Puts a key-value pair into the store as part of a bath operation. - * - * @param batch the batch operation - * @param key metadata key - * @param value metadata value - */ - void putWithBatch(BatchOperation batch, KEY key, VALUE value) - throws IOException; - - /** - * @return true if the metadata store is empty. - * @throws IOException on Failure - */ - boolean isEmpty() throws IOException; - - /** - * Check if a given key exists in Metadata store. - * (Optimization to save on data deserialization) - * A lock on the key / bucket needs to be acquired before invoking this API. - * @param key metadata key - * @return true if the metadata store contains a key. - * @throws IOException on Failure - */ - boolean isExist(KEY key) throws IOException; - - /** - * Returns the value mapped to the given key in byte array or returns null - * if the key is not found. - * - * @param key metadata key - * @return value in byte array or null if the key is not found. - * @throws IOException on Failure - */ - VALUE get(KEY key) throws IOException; - - /** - * Deletes a key from the metadata store. - * - * @param key metadata key - * @throws IOException on Failure - */ - void delete(KEY key) throws IOException; - - /** - * Deletes a key from the metadata store as part of a batch operation. - * - * @param batch the batch operation - * @param key metadata key - * @throws IOException on Failure - */ - void deleteWithBatch(BatchOperation batch, KEY key) throws IOException; - - /** - * Returns the iterator for this metadata store. - * - * @return MetaStoreIterator - */ - TableIterator> iterator(); - - /** - * Returns the Name of this Table. - * @return - Table Name. - * @throws IOException on failure. - */ - String getName() throws IOException; - - /** - * Returns the key count of this Table. Note the result can be inaccurate. - * @return Estimated key count of this Table - * @throws IOException on failure - */ - long getEstimatedKeyCount() throws IOException; - - /** - * Add entry to the table cache. - * - * If the cacheKey already exists, it will override the entry. - * @param cacheKey - * @param cacheValue - */ - default void addCacheEntry(CacheKey cacheKey, - CacheValue cacheValue) { - throw new NotImplementedException("addCacheEntry is not implemented"); - } - - /** - * Get the cache value from table cache. - * @param cacheKey - */ - default CacheValue getCacheValue(CacheKey cacheKey) { - throw new NotImplementedException("getCacheValue is not implemented"); - } - - /** - * Removes all the entries from the table cache which are having epoch value - * less - * than or equal to specified epoch value. - * @param epoch - */ - default void cleanupCache(long epoch) { - throw new NotImplementedException("cleanupCache is not implemented"); - } - - /** - * Return cache iterator maintained for this table. - */ - default Iterator, CacheValue>> - cacheIterator() { - throw new NotImplementedException("cacheIterator is not implemented"); - } - - /** - * Class used to represent the key and value pair of a db entry. - */ - interface KeyValue { - - KEY getKey() throws IOException; - - VALUE getValue() throws IOException; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java deleted file mode 100644 index d8eb401659edb..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdfs.DFSUtil; -import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.ColumnFamilyOptions; - -/** - * Class that maintains Table Configuration. - */ -public class TableConfig { - private final String name; - private final ColumnFamilyOptions columnFamilyOptions; - - - /** - * Constructs a Table Config. - * @param name - Name of the Table. - * @param columnFamilyOptions - Column Family options. - */ - public TableConfig(String name, ColumnFamilyOptions columnFamilyOptions) { - this.name = name; - this.columnFamilyOptions = columnFamilyOptions; - } - - /** - * Returns the Name for this Table. - * @return - Name String - */ - public String getName() { - return name; - } - - /** - * Returns a ColumnFamilyDescriptor for this table. - * @return ColumnFamilyDescriptor - */ - public ColumnFamilyDescriptor getDescriptor() { - return new ColumnFamilyDescriptor(DFSUtil.string2Bytes(name), - columnFamilyOptions); - } - - /** - * Returns Column family options for this Table. - * @return ColumnFamilyOptions used for the Table. - */ - public ColumnFamilyOptions getColumnFamilyOptions() { - return columnFamilyOptions; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - TableConfig that = (TableConfig) o; - return new EqualsBuilder() - .append(getName(), that.getName()) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(getName()) - .toHashCode(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java deleted file mode 100644 index a684157a43b11..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.Closeable; -import java.io.IOException; -import java.util.Iterator; - -/** - * Iterator for MetaDataStore DB. - * - * @param - */ -public interface TableIterator extends Iterator, Closeable { - - /** - * seek to first entry. - */ - void seekToFirst(); - - /** - * seek to last entry. - */ - void seekToLast(); - - /** - * Seek to the specific key. - * - * @param key - Bytes that represent the key. - * @return VALUE. - */ - T seek(KEY key) throws IOException; - - /** - * Returns the key value at the current position. - * @return KEY - */ - KEY key() throws IOException; - - /** - * Returns the VALUE at the current position. - * @return VALUE - */ - T value(); - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java deleted file mode 100644 index 597eff1f658fe..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java +++ /dev/null @@ -1,361 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.util.Iterator; -import java.util.Map; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Optional; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheResult; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl; -import org.apache.hadoop.hdds.utils.db.cache.TableCache; -import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl.CacheCleanupPolicy; - -import static org.apache.hadoop.hdds.utils.db.cache.CacheResult.CacheStatus.EXISTS; -import static org.apache.hadoop.hdds.utils.db.cache.CacheResult.CacheStatus.NOT_EXIST; -/** - * Strongly typed table implementation. - *

- * Automatically converts values and keys using a raw byte[] based table - * implementation and registered converters. - * - * @param type of the keys in the store. - * @param type of the values in the store. - */ -public class TypedTable implements Table { - - private final Table rawTable; - - private final CodecRegistry codecRegistry; - - private final Class keyType; - - private final Class valueType; - - private final TableCache, CacheValue> cache; - - private final static long EPOCH_DEFAULT = -1L; - - /** - * Create an TypedTable from the raw table. - * Default cleanup policy used for the table is - * {@link CacheCleanupPolicy#MANUAL}. - * @param rawTable - * @param codecRegistry - * @param keyType - * @param valueType - */ - public TypedTable( - Table rawTable, - CodecRegistry codecRegistry, Class keyType, - Class valueType) throws IOException { - this(rawTable, codecRegistry, keyType, valueType, - CacheCleanupPolicy.MANUAL); - } - - /** - * Create an TypedTable from the raw table with specified cleanup policy - * for table cache. - * @param rawTable - * @param codecRegistry - * @param keyType - * @param valueType - * @param cleanupPolicy - */ - public TypedTable( - Table rawTable, - CodecRegistry codecRegistry, Class keyType, - Class valueType, - TableCacheImpl.CacheCleanupPolicy cleanupPolicy) throws IOException { - this.rawTable = rawTable; - this.codecRegistry = codecRegistry; - this.keyType = keyType; - this.valueType = valueType; - cache = new TableCacheImpl<>(cleanupPolicy); - - if (cleanupPolicy == CacheCleanupPolicy.NEVER) { - //fill cache - try(TableIterator> tableIterator = - iterator()) { - - while (tableIterator.hasNext()) { - KeyValue< KEY, VALUE > kv = tableIterator.next(); - - // We should build cache after OM restart when clean up policy is - // NEVER. Setting epoch value -1, so that when it is marked for - // delete, this will be considered for cleanup. - cache.loadInitial(new CacheKey<>(kv.getKey()), - new CacheValue<>(Optional.of(kv.getValue()), EPOCH_DEFAULT)); - } - } - } - } - - @Override - public void put(KEY key, VALUE value) throws IOException { - byte[] keyData = codecRegistry.asRawData(key); - byte[] valueData = codecRegistry.asRawData(value); - rawTable.put(keyData, valueData); - } - - @Override - public void putWithBatch(BatchOperation batch, KEY key, VALUE value) - throws IOException { - byte[] keyData = codecRegistry.asRawData(key); - byte[] valueData = codecRegistry.asRawData(value); - rawTable.putWithBatch(batch, keyData, valueData); - } - - @Override - public boolean isEmpty() throws IOException { - return rawTable.isEmpty(); - } - - @Override - public boolean isExist(KEY key) throws IOException { - - CacheResult> cacheResult = - cache.lookup(new CacheKey<>(key)); - - if (cacheResult.getCacheStatus() == EXISTS) { - return true; - } else if (cacheResult.getCacheStatus() == NOT_EXIST) { - return false; - } else { - return rawTable.isExist(codecRegistry.asRawData(key)); - } - } - - /** - * Returns the value mapped to the given key in byte array or returns null - * if the key is not found. - * - * Caller's of this method should use synchronization mechanism, when - * accessing. First it will check from cache, if it has entry return the - * value, otherwise get from the RocksDB table. - * - * @param key metadata key - * @return VALUE - * @throws IOException - */ - @Override - public VALUE get(KEY key) throws IOException { - // Here the metadata lock will guarantee that cache is not updated for same - // key during get key. - - CacheResult> cacheResult = - cache.lookup(new CacheKey<>(key)); - - if (cacheResult.getCacheStatus() == EXISTS) { - return cacheResult.getValue().getCacheValue(); - } else if (cacheResult.getCacheStatus() == NOT_EXIST) { - return null; - } else { - return getFromTable(key); - } - } - - private VALUE getFromTable(KEY key) throws IOException { - byte[] keyBytes = codecRegistry.asRawData(key); - byte[] valueBytes = rawTable.get(keyBytes); - return codecRegistry.asObject(valueBytes, valueType); - } - - @Override - public void delete(KEY key) throws IOException { - rawTable.delete(codecRegistry.asRawData(key)); - } - - @Override - public void deleteWithBatch(BatchOperation batch, KEY key) - throws IOException { - rawTable.deleteWithBatch(batch, codecRegistry.asRawData(key)); - - } - - @Override - public TableIterator iterator() { - TableIterator> iterator = - rawTable.iterator(); - return new TypedTableIterator(iterator, keyType, valueType); - } - - @Override - public String getName() throws IOException { - return rawTable.getName(); - } - - @Override - public long getEstimatedKeyCount() throws IOException { - return rawTable.getEstimatedKeyCount(); - } - - @Override - public void close() throws Exception { - rawTable.close(); - - } - - @Override - public void addCacheEntry(CacheKey cacheKey, - CacheValue cacheValue) { - // This will override the entry if there is already entry for this key. - cache.put(cacheKey, cacheValue); - } - - @Override - public CacheValue getCacheValue(CacheKey cacheKey) { - return cache.get(cacheKey); - } - - public Iterator, CacheValue>> cacheIterator() { - return cache.iterator(); - } - - @Override - public void cleanupCache(long epoch) { - cache.cleanup(epoch); - } - - @VisibleForTesting - TableCache, CacheValue> getCache() { - return cache; - } - - public Table getRawTable() { - return rawTable; - } - - public CodecRegistry getCodecRegistry() { - return codecRegistry; - } - - public Class getKeyType() { - return keyType; - } - - public Class getValueType() { - return valueType; - } - - /** - * Key value implementation for strongly typed tables. - */ - public class TypedKeyValue implements KeyValue { - - private KeyValue rawKeyValue; - - public TypedKeyValue(KeyValue rawKeyValue) { - this.rawKeyValue = rawKeyValue; - } - - public TypedKeyValue(KeyValue rawKeyValue, - Class keyType, Class valueType) { - this.rawKeyValue = rawKeyValue; - } - - @Override - public KEY getKey() throws IOException { - return codecRegistry.asObject(rawKeyValue.getKey(), keyType); - } - - @Override - public VALUE getValue() throws IOException { - return codecRegistry.asObject(rawKeyValue.getValue(), valueType); - } - } - - /** - * Table Iterator implementation for strongly typed tables. - */ - public class TypedTableIterator implements TableIterator { - - private TableIterator> - rawIterator; - private final Class keyClass; - private final Class valueClass; - - public TypedTableIterator( - TableIterator> rawIterator, - Class keyType, - Class valueType) { - this.rawIterator = rawIterator; - keyClass = keyType; - valueClass = valueType; - } - - @Override - public void seekToFirst() { - rawIterator.seekToFirst(); - } - - @Override - public void seekToLast() { - rawIterator.seekToLast(); - } - - @Override - public TypedKeyValue seek(KEY key) throws IOException { - byte[] keyBytes = codecRegistry.asRawData(key); - KeyValue result = rawIterator.seek(keyBytes); - if (result == null) { - return null; - } - return new TypedKeyValue(result); - } - - @Override - public KEY key() throws IOException { - byte[] result = rawIterator.key(); - if (result == null) { - return null; - } - return codecRegistry.asObject(result, keyClass); - } - - @Override - public TypedKeyValue value() { - KeyValue keyValue = rawIterator.value(); - if(keyValue != null) { - return new TypedKeyValue(keyValue, keyClass, valueClass); - } - return null; - } - - @Override - public void close() throws IOException { - rawIterator.close(); - } - - @Override - public boolean hasNext() { - return rawIterator.hasNext(); - } - - @Override - public TypedKeyValue next() { - return new TypedKeyValue(rawIterator.next(), keyType, - valueType); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java deleted file mode 100644 index 7be2921b6a117..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.cache; - -import java.util.Objects; - -/** - * CacheKey for the RocksDB table. - * @param - */ -public class CacheKey implements Comparable { - - private final KEY key; - - public CacheKey(KEY key) { - Objects.requireNonNull(key, "Key Should not be null in CacheKey"); - this.key = key; - } - - public KEY getCacheKey() { - return key; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - CacheKey cacheKey = (CacheKey) o; - return Objects.equals(key, cacheKey.key); - } - - @Override - public int hashCode() { - return Objects.hash(key); - } - - @Override - public int compareTo(Object o) { - if(Objects.equals(key, ((CacheKey)o).key)) { - return 0; - } else { - return key.toString().compareTo((((CacheKey) o).key).toString()); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheResult.java deleted file mode 100644 index 8c5a68ba0721b..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheResult.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.cache; - -import java.util.Objects; - -/** - * CacheResult which is returned as response for Key exist in cache or not. - * @param - */ -public class CacheResult { - - private CacheStatus cacheStatus; - private CACHEVALUE cachevalue; - - public CacheResult(CacheStatus cacheStatus, CACHEVALUE cachevalue) { - this.cacheStatus = cacheStatus; - this.cachevalue = cachevalue; - } - - public CacheStatus getCacheStatus() { - return cacheStatus; - } - - public CACHEVALUE getValue() { - return cachevalue; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - CacheResult< ? > that = (CacheResult< ? >) o; - return cacheStatus == that.cacheStatus && - Objects.equals(cachevalue, that.cachevalue); - } - - @Override - public int hashCode() { - return Objects.hash(cacheStatus, cachevalue); - } - - /** - * Status which tells whether key exists in cache or not. - */ - public enum CacheStatus { - EXISTS, // When key exists in cache. - - NOT_EXIST, // We guarantee that it does not exist. This will be returned - // when the key does not exist in cache, when cache clean up policy is - // NEVER. - MAY_EXIST // This will be returned when the key does not exist in - // cache, when cache clean up policy is MANUAL. So caller need to check - // if it might exist in it's rocksdb table. - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java deleted file mode 100644 index de9fe0d95f3d4..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.cache; - -import com.google.common.base.Optional; - -/** - * CacheValue for the RocksDB Table. - * @param - */ -public class CacheValue { - - private Optional value; - // This value is used for evict entries from cache. - // This value is set with ratis transaction context log entry index. - private long epoch; - - public CacheValue(Optional value, long epoch) { - this.value = value; - this.epoch = epoch; - } - - public VALUE getCacheValue() { - return value.orNull(); - } - - public long getEpoch() { - return epoch; - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java deleted file mode 100644 index 7235202b9a4ca..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db.cache; - -import java.util.Objects; - -/** - * Class used which describes epoch entry. This will be used during deletion - * entries from cache for partial table cache. - * @param - */ -public class EpochEntry implements Comparable { - - private long epoch; - private CACHEKEY cachekey; - - EpochEntry(long epoch, CACHEKEY cachekey) { - this.epoch = epoch; - this.cachekey = cachekey; - } - - public long getEpoch() { - return epoch; - } - - public CACHEKEY getCachekey() { - return cachekey; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - EpochEntry that = (EpochEntry) o; - return epoch == that.epoch && cachekey == that.cachekey; - } - - @Override - public int hashCode() { - return Objects.hash(epoch, cachekey); - } - - public int compareTo(Object o) { - if(this.epoch == ((EpochEntry)o).epoch) { - return 0; - } else if (this.epoch < ((EpochEntry)o).epoch) { - return -1; - } else { - return 1; - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCache.java deleted file mode 100644 index de5a07978f51e..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCache.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db.cache; - -import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.hadoop.classification.InterfaceStability.Evolving; - -import java.util.Iterator; -import java.util.Map; - -/** - * Cache used for RocksDB tables. - * @param - * @param - */ - -@Private -@Evolving -public interface TableCache { - - /** - * Return the value for the key if it is present, otherwise return null. - * @param cacheKey - * @return CACHEVALUE - */ - CACHEVALUE get(CACHEKEY cacheKey); - - /** - * This method should be called for tables with cache cleanup policy - * {@link TableCacheImpl.CacheCleanupPolicy#NEVER} after system restart to - * fill up the cache. - * @param cacheKey - * @param cacheValue - */ - void loadInitial(CACHEKEY cacheKey, CACHEVALUE cacheValue); - - /** - * Add an entry to the cache, if the key already exists it overrides. - * @param cacheKey - * @param value - */ - void put(CACHEKEY cacheKey, CACHEVALUE value); - - /** - * Removes all the entries from the cache which are having epoch value less - * than or equal to specified epoch value. - * - * If clean up policy is NEVER, this is a do nothing operation. - * If clean up policy is MANUAL, it is caller responsibility to cleanup the - * cache before calling cleanup. - * @param epoch - */ - void cleanup(long epoch); - - /** - * Return the size of the cache. - * @return size - */ - int size(); - - /** - * Return an iterator for the cache. - * @return iterator of the underlying cache for the table. - */ - Iterator> iterator(); - - /** - * Check key exist in cache or not. - * - * If it exists return CacheResult with value and status as - * {@link CacheResult.CacheStatus#EXISTS} - * - * If it does not exist: - * If cache clean up policy is - * {@link TableCacheImpl.CacheCleanupPolicy#NEVER} it means table cache is - * full cache. It return's {@link CacheResult} with null - * and status as {@link CacheResult.CacheStatus#NOT_EXIST}. - * - * If cache clean up policy is - * {@link TableCacheImpl.CacheCleanupPolicy#MANUAL} it means - * table cache is partial cache. It return's {@link CacheResult} with - * null and status as MAY_EXIST. - * - * @param cachekey - */ - CacheResult lookup(CACHEKEY cachekey); - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java deleted file mode 100644 index 3e6999a49cfaa..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db.cache; - -import java.util.Iterator; -import java.util.Map; -import java.util.NavigableSet; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.ThreadFactory; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.hadoop.classification.InterfaceStability.Evolving; - -/** - * Cache implementation for the table. Depending on the cache clean up policy - * this cache will be full cache or partial cache. - * - * If cache cleanup policy is set as {@link CacheCleanupPolicy#MANUAL}, - * this will be a partial cache. - * - * If cache cleanup policy is set as {@link CacheCleanupPolicy#NEVER}, - * this will be a full cache. - */ -@Private -@Evolving -public class TableCacheImpl implements TableCache { - - private final Map cache; - private final NavigableSet> epochEntries; - private ExecutorService executorService; - private CacheCleanupPolicy cleanupPolicy; - - - - public TableCacheImpl(CacheCleanupPolicy cleanupPolicy) { - - // As for full table cache only we need elements to be inserted in sorted - // manner, so that list will be easy. For other we can go with Hash map. - if (cleanupPolicy == CacheCleanupPolicy.NEVER) { - cache = new ConcurrentSkipListMap<>(); - } else { - cache = new ConcurrentHashMap<>(); - } - epochEntries = new ConcurrentSkipListSet<>(); - // Created a singleThreadExecutor, so one cleanup will be running at a - // time. - ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("PartialTableCache Cleanup Thread - %d").build(); - executorService = Executors.newSingleThreadExecutor(build); - this.cleanupPolicy = cleanupPolicy; - } - - @Override - public CACHEVALUE get(CACHEKEY cachekey) { - return cache.get(cachekey); - } - - @Override - public void loadInitial(CACHEKEY cacheKey, CACHEVALUE cacheValue) { - // No need to add entry to epochEntries. Adding to cache is required during - // normal put operation. - cache.put(cacheKey, cacheValue); - } - - @Override - public void put(CACHEKEY cacheKey, CACHEVALUE value) { - cache.put(cacheKey, value); - epochEntries.add(new EpochEntry<>(value.getEpoch(), cacheKey)); - } - - @Override - public void cleanup(long epoch) { - executorService.submit(() -> evictCache(epoch, cleanupPolicy)); - } - - @Override - public int size() { - return cache.size(); - } - - @Override - public Iterator> iterator() { - return cache.entrySet().iterator(); - } - - private void evictCache(long epoch, CacheCleanupPolicy cacheCleanupPolicy) { - EpochEntry currentEntry = null; - for (Iterator> iterator = epochEntries.iterator(); - iterator.hasNext();) { - currentEntry = iterator.next(); - CACHEKEY cachekey = currentEntry.getCachekey(); - CacheValue cacheValue = cache.computeIfPresent(cachekey, ((k, v) -> { - if (cleanupPolicy == CacheCleanupPolicy.MANUAL) { - if (v.getEpoch() <= epoch) { - iterator.remove(); - return null; - } - } else if (cleanupPolicy == CacheCleanupPolicy.NEVER) { - // Remove only entries which are marked for delete. - if (v.getEpoch() <= epoch && v.getCacheValue() == null) { - iterator.remove(); - return null; - } - } - return v; - })); - // If currentEntry epoch is greater than epoch, we have deleted all - // entries less than specified epoch. So, we can break. - if (cacheValue != null && cacheValue.getEpoch() >= epoch) { - break; - } - } - } - - public CacheResult lookup(CACHEKEY cachekey) { - - CACHEVALUE cachevalue = cache.get(cachekey); - if (cachevalue == null) { - if (cleanupPolicy == CacheCleanupPolicy.NEVER) { - return new CacheResult<>(CacheResult.CacheStatus.NOT_EXIST, null); - } else { - return new CacheResult<>(CacheResult.CacheStatus.MAY_EXIST, - null); - } - } else { - if (cachevalue.getCacheValue() != null) { - return new CacheResult<>(CacheResult.CacheStatus.EXISTS, cachevalue); - } else { - // When entity is marked for delete, cacheValue will be set to null. - // In that case we can return NOT_EXIST irrespective of cache cleanup - // policy. - return new CacheResult<>(CacheResult.CacheStatus.NOT_EXIST, null); - } - } - } - - /** - * Cleanup policies for table cache. - */ - public enum CacheCleanupPolicy { - NEVER, // Cache will not be cleaned up. This mean's the table maintains - // full cache. - MANUAL // Cache will be cleaned up, once after flushing to DB. It is - // caller's responsibility to flush to DB, before calling cleanup cache. - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java deleted file mode 100644 index eb9c5b9da8f32..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils.db.cache; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java deleted file mode 100644 index 8b56bffa777d8..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Database interfaces for Ozone. - */ -package org.apache.hadoop.hdds.utils.db; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/package-info.java deleted file mode 100644 index 4576dc82a8ea2..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java deleted file mode 100644 index 3f7d0b915d5d5..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ /dev/null @@ -1,464 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; - -import org.apache.ratis.proto.RaftProtos.ReplicationLevel; -import org.apache.ratis.util.TimeDuration; - -import java.util.concurrent.TimeUnit; - -/** - * This class contains constants for configuration keys used in Ozone. - */ -@InterfaceAudience.Public -@InterfaceStability.Unstable -public final class OzoneConfigKeys { - public static final String OZONE_TAGS_SYSTEM_KEY = - "ozone.tags.system"; - public static final String DFS_CONTAINER_IPC_PORT = - "dfs.container.ipc"; - public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859; - - public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs"; - - /** - * - * When set to true, allocate a random free port for ozone container, - * so that a mini cluster is able to launch multiple containers on a node. - * - * When set to false (default), container port is fixed as specified by - * DFS_CONTAINER_IPC_PORT_DEFAULT. - */ - public static final String DFS_CONTAINER_IPC_RANDOM_PORT = - "dfs.container.ipc.random.port"; - public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT = - false; - - public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY = - "dfs.container.chunk.write.sync"; - public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false; - /** - * Ratis Port where containers listen to. - */ - public static final String DFS_CONTAINER_RATIS_IPC_PORT = - "dfs.container.ratis.ipc"; - public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858; - - /** - * When set to true, allocate a random free port for ozone container, so that - * a mini cluster is able to launch multiple containers on a node. - */ - public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT = - "dfs.container.ratis.ipc.random.port"; - public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = - false; - public static final String OZONE_ENABLED = - "ozone.enabled"; - public static final boolean OZONE_ENABLED_DEFAULT = false; - public static final String OZONE_TRACE_ENABLED_KEY = - "ozone.trace.enabled"; - public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false; - - public static final String OZONE_METADATA_STORE_IMPL = - "ozone.metastore.impl"; - public static final String OZONE_METADATA_STORE_IMPL_LEVELDB = - "LevelDB"; - public static final String OZONE_METADATA_STORE_IMPL_ROCKSDB = - "RocksDB"; - public static final String OZONE_METADATA_STORE_IMPL_DEFAULT = - OZONE_METADATA_STORE_IMPL_ROCKSDB; - - public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS = - "ozone.metastore.rocksdb.statistics"; - - public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT = - "OFF"; - public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF = - "OFF"; - - public static final String OZONE_UNSAFEBYTEOPERATIONS_ENABLED = - "ozone.UnsafeByteOperations.enabled"; - public static final boolean OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT - = true; - - public static final String OZONE_CONTAINER_CACHE_SIZE = - "ozone.container.cache.size"; - public static final int OZONE_CONTAINER_CACHE_DEFAULT = 1024; - - public static final String OZONE_SCM_BLOCK_SIZE = - "ozone.scm.block.size"; - public static final String OZONE_SCM_BLOCK_SIZE_DEFAULT = "256MB"; - - /** - * Ozone administrator users delimited by comma. - * If not set, only the user who launches an ozone service will be the - * admin user. This property must be set if ozone services are started by - * different users. Otherwise the RPC layer will reject calls from - * other servers which are started by users not in the list. - * */ - public static final String OZONE_ADMINISTRATORS = - "ozone.administrators"; - /** - * Used only for testing purpose. Results in making every user an admin. - * */ - public static final String OZONE_ADMINISTRATORS_WILDCARD = "*"; - - public static final String OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE = - "ozone.client.stream.buffer.flush.size"; - - public static final String OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE_DEFAULT = - "64MB"; - - public static final String OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE = - "ozone.client.stream.buffer.max.size"; - - public static final String OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE_DEFAULT = - "128MB"; - - public static final String OZONE_CLIENT_WATCH_REQUEST_TIMEOUT = - "ozone.client.watch.request.timeout"; - - public static final String OZONE_CLIENT_WATCH_REQUEST_TIMEOUT_DEFAULT = - "30s"; - - public static final String OZONE_CLIENT_MAX_RETRIES = - "ozone.client.max.retries"; - public static final int OZONE_CLIENT_MAX_RETRIES_DEFAULT = 100; - public static final String OZONE_CLIENT_RETRY_INTERVAL = - "ozone.client.retry.interval"; - public static final TimeDuration OZONE_CLIENT_RETRY_INTERVAL_DEFAULT = - TimeDuration.valueOf(0, TimeUnit.MILLISECONDS); - - // This defines the overall connection limit for the connection pool used in - // RestClient. - public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_MAX = - "ozone.rest.client.http.connection.max"; - public static final int OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT = 100; - - // This defines the connection limit per one HTTP route/host. - public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX = - "ozone.rest.client.http.connection.per-route.max"; - - public static final int - OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT = 20; - - public static final String OZONE_CLIENT_SOCKET_TIMEOUT = - "ozone.client.socket.timeout"; - public static final int OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT = 5000; - public static final String OZONE_CLIENT_CONNECTION_TIMEOUT = - "ozone.client.connection.timeout"; - public static final int OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT = 5000; - - public static final String OZONE_REPLICATION = "ozone.replication"; - public static final int OZONE_REPLICATION_DEFAULT = - ReplicationFactor.THREE.getValue(); - - public static final String OZONE_REPLICATION_TYPE = "ozone.replication.type"; - public static final String OZONE_REPLICATION_TYPE_DEFAULT = - ReplicationType.RATIS.toString(); - - /** - * Configuration property to configure the cache size of client list calls. - */ - public static final String OZONE_CLIENT_LIST_CACHE_SIZE = - "ozone.client.list.cache"; - public static final int OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT = 1000; - - /** - * Configuration properties for Ozone Block Deleting Service. - */ - public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL = - "ozone.block.deleting.service.interval"; - public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT - = "60s"; - - /** - * The interval of open key clean service. - */ - public static final String OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS = - "ozone.open.key.cleanup.service.interval.seconds"; - public static final int - OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT - = 24 * 3600; // a total of 24 hour - - /** - * An open key gets cleaned up when it is being in open state for too long. - */ - public static final String OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS = - "ozone.open.key.expire.threshold"; - public static final int OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT = - 24 * 3600; - - public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT = - "ozone.block.deleting.service.timeout"; - public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT - = "300s"; // 300s for default - - public static final String OZONE_KEY_PREALLOCATION_BLOCKS_MAX = - "ozone.key.preallocation.max.blocks"; - public static final int OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT - = 64; - - public static final String OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER = - "ozone.block.deleting.limit.per.task"; - public static final int OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT - = 1000; - - public static final String OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL - = "ozone.block.deleting.container.limit.per.interval"; - public static final int - OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10; - - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; - public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; - public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY; - public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; - public static final ReplicationLevel - DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; - public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; - public static final String - DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT; - - // config settings to enable stateMachineData write timeout - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; - public static final TimeDuration - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT; - - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL = - ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL; - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT = - ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT; - - public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR = - "dfs.container.ratis.datanode.storage.dir"; - public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY; - public static final TimeDuration - DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT; - public static final String DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY = - ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY; - public static final int DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT = - ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT; - public static final String DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY = - ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY; - public static final TimeDuration - DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT = - ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT; - public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY; - public static final TimeDuration - DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT; - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES; - public static final int - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS; - public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT; - public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; - public static final int - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; - public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP; - public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS; - public static final int - DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT; - public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY; - public static final TimeDuration - DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT; - public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; - public static final TimeDuration - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT; - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY; - public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT; - - public static final String DFS_RATIS_SERVER_FAILURE_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY; - public static final TimeDuration - DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT; - - public static final String HDDS_DATANODE_PLUGINS_KEY = - "hdds.datanode.plugins"; - - public static final String - HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD = - "hdds.datanode.storage.utilization.warning.threshold"; - public static final double - HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD_DEFAULT = 0.75; - public static final String - HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD = - "hdds.datanode.storage.utilization.critical.threshold"; - public static final double - HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT = 0.95; - - public static final String OZONE_SECURITY_ENABLED_KEY = - "ozone.security.enabled"; - public static final boolean OZONE_SECURITY_ENABLED_DEFAULT = false; - - public static final String OZONE_CONTAINER_COPY_WORKDIR = - "hdds.datanode.replication.work.dir"; - - /** - * Config properties to set client side checksum properties. - */ - public static final String OZONE_CLIENT_CHECKSUM_TYPE = - "ozone.client.checksum.type"; - public static final String OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT = "CRC32"; - public static final String OZONE_CLIENT_BYTES_PER_CHECKSUM = - "ozone.client.bytes.per.checksum"; - public static final String OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT = "1MB"; - public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT_BYTES = - 1024 * 1024; - public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE = 256 * 1024; - public static final String OZONE_CLIENT_VERIFY_CHECKSUM = - "ozone.client.verify.checksum"; - public static final boolean OZONE_CLIENT_VERIFY_CHECKSUM_DEFAULT = true; - public static final String OZONE_ACL_AUTHORIZER_CLASS = - "ozone.acl.authorizer.class"; - public static final String OZONE_ACL_AUTHORIZER_CLASS_DEFAULT = - "org.apache.hadoop.ozone.security.acl.OzoneAccessAuthorizer"; - public static final String OZONE_ACL_AUTHORIZER_CLASS_NATIVE = - "org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer"; - public static final String OZONE_ACL_ENABLED = - "ozone.acl.enabled"; - public static final boolean OZONE_ACL_ENABLED_DEFAULT = - false; - public static final String OZONE_S3_TOKEN_MAX_LIFETIME_KEY = - "ozone.s3.token.max.lifetime"; - public static final String OZONE_S3_TOKEN_MAX_LIFETIME_KEY_DEFAULT = "3m"; - //For technical reasons this is unused and hardcoded to the - // OzoneFileSystem.initialize. - public static final String OZONE_FS_ISOLATED_CLASSLOADER = - "ozone.fs.isolated-classloader"; - - // Ozone Client Retry and Failover configurations - public static final String OZONE_CLIENT_RETRY_MAX_ATTEMPTS_KEY = - "ozone.client.retry.max.attempts"; - public static final int OZONE_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT = - 10; - public static final String OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY = - "ozone.client.failover.max.attempts"; - public static final int OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT = - 15; - public static final String OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY = - "ozone.client.failover.sleep.base.millis"; - public static final int OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT = - 500; - public static final String OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY = - "ozone.client.failover.sleep.max.millis"; - public static final int OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT = - 15000; - - public static final String OZONE_FREON_HTTP_ENABLED_KEY = - "ozone.freon.http.enabled"; - public static final String OZONE_FREON_HTTP_BIND_HOST_KEY = - "ozone.freon.http-bind-host"; - public static final String OZONE_FREON_HTTPS_BIND_HOST_KEY = - "ozone.freon.https-bind-host"; - public static final String OZONE_FREON_HTTP_ADDRESS_KEY = - "ozone.freon.http-address"; - public static final String OZONE_FREON_HTTPS_ADDRESS_KEY = - "ozone.freon.https-address"; - - public static final String OZONE_FREON_HTTP_BIND_HOST_DEFAULT = "0.0.0.0"; - public static final int OZONE_FREON_HTTP_BIND_PORT_DEFAULT = 9884; - public static final int OZONE_FREON_HTTPS_BIND_PORT_DEFAULT = 9885; - public static final String - OZONE_FREON_HTTP_KERBEROS_PRINCIPAL_KEY = - "ozone.freon.http.kerberos.principal"; - public static final String - OZONE_FREON_HTTP_KERBEROS_KEYTAB_FILE_KEY = - "ozone.freon.http.kerberos.keytab"; - - public static final String OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY = - "ozone.network.topology.aware.read"; - public static final boolean OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT = false; - - public static final String OZONE_MANAGER_FAIR_LOCK = "ozone.om.lock.fair"; - public static final boolean OZONE_MANAGER_FAIR_LOCK_DEFAULT = false; - - /** - * There is no need to instantiate this class. - */ - private OzoneConfigKeys() { - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java deleted file mode 100644 index 9817d877eb55a..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ /dev/null @@ -1,327 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.thirdparty.io.grpc.Context; -import org.apache.ratis.thirdparty.io.grpc.Metadata; - -import static org.apache.ratis.thirdparty.io.grpc.Metadata.ASCII_STRING_MARSHALLER; - -/** - * Set of constants used in Ozone implementation. - */ -@InterfaceAudience.Private -public final class OzoneConsts { - - - public static final String STORAGE_DIR = "scm"; - public static final String SCM_ID = "scmUuid"; - - public static final String OZONE_SIMPLE_ROOT_USER = "root"; - public static final String OZONE_SIMPLE_HDFS_USER = "hdfs"; - - public static final String STORAGE_ID = "storageID"; - public static final String DATANODE_UUID = "datanodeUuid"; - public static final String CLUSTER_ID = "clusterID"; - public static final String LAYOUTVERSION = "layOutVersion"; - public static final String CTIME = "ctime"; - /* - * BucketName length is used for both buckets and volume lengths - */ - public static final int OZONE_MIN_BUCKET_NAME_LENGTH = 3; - public static final int OZONE_MAX_BUCKET_NAME_LENGTH = 63; - - public static final String OZONE_ACL_USER_TYPE = "user"; - public static final String OZONE_ACL_GROUP_TYPE = "group"; - public static final String OZONE_ACL_WORLD_TYPE = "world"; - public static final String OZONE_ACL_ANONYMOUS_TYPE = "anonymous"; - public static final String OZONE_ACL_IP_TYPE = "ip"; - - public static final String OZONE_ACL_READ = "r"; - public static final String OZONE_ACL_WRITE = "w"; - public static final String OZONE_ACL_DELETE = "d"; - public static final String OZONE_ACL_LIST = "l"; - public static final String OZONE_ACL_ALL = "a"; - public static final String OZONE_ACL_NONE = "n"; - public static final String OZONE_ACL_CREATE = "c"; - public static final String OZONE_ACL_READ_ACL = "x"; - public static final String OZONE_ACL_WRITE_ACL = "y"; - - - public static final String OZONE_DATE_FORMAT = - "EEE, dd MMM yyyy HH:mm:ss zzz"; - public static final String OZONE_TIME_ZONE = "GMT"; - - public static final String OZONE_COMPONENT = "component"; - public static final String OZONE_FUNCTION = "function"; - public static final String OZONE_RESOURCE = "resource"; - public static final String OZONE_USER = "user"; - public static final String OZONE_REQUEST = "request"; - - // OM Http server endpoints - public static final String OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT = - "/serviceList"; - public static final String OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT = - "/dbCheckpoint"; - - // Ozone File System scheme - public static final String OZONE_URI_SCHEME = "o3fs"; - - public static final String OZONE_RPC_SCHEME = "o3"; - public static final String OZONE_HTTP_SCHEME = "http"; - public static final String OZONE_URI_DELIMITER = "/"; - - public static final String CONTAINER_EXTENSION = ".container"; - public static final String CONTAINER_META = ".meta"; - - // Refer to {@link ContainerReader} for container storage layout on disk. - public static final String CONTAINER_PREFIX = "containers"; - public static final String CONTAINER_META_PATH = "metadata"; - public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp"; - public static final String CONTAINER_CHUNK_NAME_DELIMITER = "."; - public static final String CONTAINER_ROOT_PREFIX = "repository"; - - public static final String FILE_HASH = "SHA-256"; - public static final String MD5_HASH = "MD5"; - public final static String CHUNK_OVERWRITE = "OverWriteRequested"; - - public static final int CHUNK_SIZE = 1 * 1024 * 1024; // 1 MB - public static final long KB = 1024L; - public static final long MB = KB * 1024L; - public static final long GB = MB * 1024L; - public static final long TB = GB * 1024L; - - /** - * level DB names used by SCM and data nodes. - */ - public static final String CONTAINER_DB_SUFFIX = "container.db"; - public static final String PIPELINE_DB_SUFFIX = "pipeline.db"; - public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX; - public static final String SCM_PIPELINE_DB = "scm-" + PIPELINE_DB_SUFFIX; - public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX; - public static final String DELETED_BLOCK_DB = "deletedBlock.db"; - public static final String OM_DB_NAME = "om.db"; - public static final String OM_DB_BACKUP_PREFIX = "om.db.backup."; - public static final String OM_DB_CHECKPOINTS_DIR_NAME = "om.db.checkpoints"; - public static final String OZONE_MANAGER_TOKEN_DB_NAME = "om-token.db"; - public static final String SCM_DB_NAME = "scm.db"; - - public static final String STORAGE_DIR_CHUNKS = "chunks"; - public static final String OZONE_DB_CHECKPOINT_REQUEST_FLUSH = - "flushBeforeCheckpoint"; - - /** - * Supports Bucket Versioning. - */ - public enum Versioning { - NOT_DEFINED, ENABLED, DISABLED; - - public static Versioning getVersioning(boolean versioning) { - return versioning ? ENABLED : DISABLED; - } - } - - public static final String DELETING_KEY_PREFIX = "#deleting#"; - public static final String DELETED_KEY_PREFIX = "#deleted#"; - public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#"; - public static final String BLOCK_COMMIT_SEQUENCE_ID_PREFIX = "#BCSID"; - - /** - * OM LevelDB prefixes. - * - * OM DB stores metadata as KV pairs with certain prefixes, - * prefix is used to improve the performance to get related - * metadata. - * - * OM DB Schema: - * ---------------------------------------------------------- - * | KEY | VALUE | - * ---------------------------------------------------------- - * | $userName | VolumeList | - * ---------------------------------------------------------- - * | /#volumeName | VolumeInfo | - * ---------------------------------------------------------- - * | /#volumeName/#bucketName | BucketInfo | - * ---------------------------------------------------------- - * | /volumeName/bucketName/keyName | KeyInfo | - * ---------------------------------------------------------- - * | #deleting#/volumeName/bucketName/keyName | KeyInfo | - * ---------------------------------------------------------- - */ - - public static final String OM_KEY_PREFIX = "/"; - public static final String OM_USER_PREFIX = "$"; - public static final String OM_S3_PREFIX ="S3:"; - public static final String OM_S3_VOLUME_PREFIX = "s3"; - public static final String OM_S3_SECRET = "S3Secret:"; - public static final String OM_PREFIX = "Prefix:"; - - /** - * Max chunk size limit. - */ - public static final int OZONE_SCM_CHUNK_MAX_SIZE = 32 * 1024 * 1024; - - - /** - * Max OM Quota size of 1024 PB. - */ - public static final long MAX_QUOTA_IN_BYTES = 1024L * 1024 * TB; - - /** - * Max number of keys returned per list buckets operation. - */ - public static final int MAX_LISTBUCKETS_SIZE = 1024; - - /** - * Max number of keys returned per list keys operation. - */ - public static final int MAX_LISTKEYS_SIZE = 1024; - - /** - * Max number of volumes returned per list volumes operation. - */ - public static final int MAX_LISTVOLUMES_SIZE = 1024; - - public static final int INVALID_PORT = -1; - - - /** - * Default SCM Datanode ID file name. - */ - public static final String OZONE_SCM_DATANODE_ID_FILE_DEFAULT = "datanode.id"; - - // The ServiceListJSONServlet context attribute where OzoneManager - // instance gets stored. - public static final String OM_CONTEXT_ATTRIBUTE = "ozone.om"; - - private OzoneConsts() { - // Never Constructed - } - - // YAML fields for .container files - public static final String CONTAINER_ID = "containerID"; - public static final String CONTAINER_TYPE = "containerType"; - public static final String STATE = "state"; - public static final String METADATA = "metadata"; - public static final String MAX_SIZE = "maxSize"; - public static final String METADATA_PATH = "metadataPath"; - public static final String CHUNKS_PATH = "chunksPath"; - public static final String CONTAINER_DB_TYPE = "containerDBType"; - public static final String CHECKSUM = "checksum"; - public static final String ORIGIN_PIPELINE_ID = "originPipelineId"; - public static final String ORIGIN_NODE_ID = "originNodeId"; - - // Supported store types. - public static final String OZONE = "ozone"; - public static final String S3 = "s3"; - - // For OM Audit usage - public static final String VOLUME = "volume"; - public static final String BUCKET = "bucket"; - public static final String KEY = "key"; - public static final String QUOTA = "quota"; - public static final String QUOTA_IN_BYTES = "quotaInBytes"; - public static final String OBJECT_ID = "objectID"; - public static final String UPDATE_ID = "updateID"; - public static final String CLIENT_ID = "clientID"; - public static final String OWNER = "owner"; - public static final String ADMIN = "admin"; - public static final String USERNAME = "username"; - public static final String PREV_KEY = "prevKey"; - public static final String START_KEY = "startKey"; - public static final String MAX_KEYS = "maxKeys"; - public static final String PREFIX = "prefix"; - public static final String KEY_PREFIX = "keyPrefix"; - public static final String ACL = "acl"; - public static final String ACLS = "acls"; - public static final String USER_ACL = "userAcl"; - public static final String ADD_ACLS = "addAcls"; - public static final String REMOVE_ACLS = "removeAcls"; - public static final String MAX_NUM_OF_BUCKETS = "maxNumOfBuckets"; - public static final String TO_KEY_NAME = "toKeyName"; - public static final String STORAGE_TYPE = "storageType"; - public static final String RESOURCE_TYPE = "resourceType"; - public static final String IS_VERSION_ENABLED = "isVersionEnabled"; - public static final String CREATION_TIME = "creationTime"; - public static final String DATA_SIZE = "dataSize"; - public static final String REPLICATION_TYPE = "replicationType"; - public static final String REPLICATION_FACTOR = "replicationFactor"; - public static final String KEY_LOCATION_INFO = "keyLocationInfo"; - public static final String MULTIPART_LIST = "multipartList"; - public static final String UPLOAD_ID = "uploadID"; - public static final String PART_NUMBER_MARKER = "partNumberMarker"; - public static final String MAX_PARTS = "maxParts"; - public static final String S3_BUCKET = "s3Bucket"; - public static final String S3_GETSECRET_USER = "S3GetSecretUser"; - - - - // For OM metrics saving to a file - public static final String OM_METRICS_FILE = "omMetrics"; - public static final String OM_METRICS_TEMP_FILE = OM_METRICS_FILE + ".tmp"; - - // For Multipart upload - public static final int OM_MULTIPART_MIN_SIZE = 5 * 1024 * 1024; - - // GRPC block token metadata header and context key - public static final String OZONE_BLOCK_TOKEN = "blocktoken"; - public static final Context.Key UGI_CTX_KEY = - Context.key("UGI"); - - public static final Metadata.Key OBT_METADATA_KEY = - Metadata.Key.of(OZONE_BLOCK_TOKEN, ASCII_STRING_MARSHALLER); - public static final Metadata.Key USER_METADATA_KEY = - Metadata.Key.of(OZONE_USER, ASCII_STRING_MARSHALLER); - - public static final String RPC_PORT = "RPC"; - - // Default OMServiceID for OM Ratis servers to use as RaftGroupId - public static final String OM_SERVICE_ID_DEFAULT = "omServiceIdDefault"; - - // Dummy OMNodeID for OM Clients to use for a non-HA OM setup - public static final String OM_NODE_ID_DUMMY = "omNodeIdDummy"; - - // OM Ratis snapshot file to store the last applied index - public static final String OM_RATIS_SNAPSHOT_INDEX = "ratisSnapshotIndex"; - - // OM Http request parameter to be used while downloading DB checkpoint - // from OM leader to follower - public static final String OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT = - "snapshotBeforeCheckpoint"; - - public static final String JAVA_TMP_DIR = "java.io.tmpdir"; - public static final String LOCALHOST = "localhost"; - - - public static final int S3_BUCKET_MIN_LENGTH = 3; - public static final int S3_BUCKET_MAX_LENGTH = 64; - - //GDPR - public static final String GDPR_FLAG = "gdprEnabled"; - public static final String GDPR_ALGORITHM_NAME = "AES"; - public static final int GDPR_DEFAULT_RANDOM_SECRET_LENGTH = 16; - public static final String GDPR_CHARSET = "UTF-8"; - public static final String GDPR_LENGTH = "length"; - public static final String GDPR_SECRET = "secret"; - public static final String GDPR_ALGORITHM = "algorithm"; - - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java deleted file mode 100644 index c1fb893806177..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; - -import org.apache.commons.validator.routines.InetAddressValidator; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.InetAddress; -import java.net.NetworkInterface; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Enumeration; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -/** - * Ozone security Util class. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public final class OzoneSecurityUtil { - - private final static Logger LOG = - LoggerFactory.getLogger(OzoneSecurityUtil.class); - // List of ip's not recommended to be added to CSR. - private final static Set INVALID_IPS = new HashSet<>(Arrays.asList( - "0.0.0.0", "127.0.0.1")); - - private OzoneSecurityUtil() { - } - - public static boolean isSecurityEnabled(Configuration conf) { - return conf.getBoolean(OZONE_SECURITY_ENABLED_KEY, - OZONE_SECURITY_ENABLED_DEFAULT); - } - - /** - * Returns Keys status. - * - * @return True if the key files exist. - */ - public static boolean checkIfFileExist(Path path, String fileName) { - if (Files.exists(path) && Files.exists(Paths.get(path.toString(), - fileName))) { - return true; - } - return false; - } - - /** - * Iterates through network interfaces and return all valid ip's not - * listed in CertificateSignRequest#INVALID_IPS. - * - * @return List - * @throws IOException if no network interface are found or if an error - * occurs. - */ - public static List getValidInetsForCurrentHost() - throws IOException { - List hostIps = new ArrayList<>(); - InetAddressValidator ipValidator = InetAddressValidator.getInstance(); - - Enumeration enumNI = - NetworkInterface.getNetworkInterfaces(); - if (enumNI != null) { - while (enumNI.hasMoreElements()) { - NetworkInterface ifc = enumNI.nextElement(); - if (ifc.isUp()) { - Enumeration enumAdds = ifc.getInetAddresses(); - while (enumAdds.hasMoreElements()) { - InetAddress addr = enumAdds.nextElement(); - - if (ipValidator.isValid(addr.getHostAddress()) - && !INVALID_IPS.contains(addr.getHostAddress())) { - LOG.info("Adding ip:{},host:{}", addr.getHostAddress(), - addr.getHostName()); - hostIps.add(addr); - } else { - LOG.info("ip:{},host:{} not returned.", addr.getHostAddress(), - addr.getHostName()); - } - } - } - } - return hostIps; - } else { - throw new IOException("Unable to get network interfaces."); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java deleted file mode 100644 index 8c1d6f0c67ddf..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.audit; - -/** - * Interface to define AuditAction. - */ -public interface AuditAction { - /** - * Implementation must override. - * @return String - */ - String getAction(); -} - diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java deleted file mode 100644 index 098ab6b2f7f0d..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.audit; - -/** - * Enum to define AuditEventStatus values. - */ -public enum AuditEventStatus { - SUCCESS("SUCCESS"), - FAILURE("FAILURE"); - - private String status; - - AuditEventStatus(String status){ - this.status = status; - } - - public String getStatus() { - return status; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java deleted file mode 100644 index ee6f45dadb4c4..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.audit; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Marker; -import org.apache.logging.log4j.spi.ExtendedLogger; - - -/** - * Class to define Audit Logger for Ozone. - */ -public class AuditLogger { - - private ExtendedLogger logger; - private static final String FQCN = AuditLogger.class.getName(); - private static final Marker WRITE_MARKER = AuditMarker.WRITE.getMarker(); - private static final Marker READ_MARKER = AuditMarker.READ.getMarker(); - - /** - * Parametrized Constructor to initialize logger. - * @param type Audit Logger Type - */ - public AuditLogger(AuditLoggerType type){ - initializeLogger(type); - } - - /** - * Initializes the logger with specific type. - * @param loggerType specified one of the values from enum AuditLoggerType. - */ - private void initializeLogger(AuditLoggerType loggerType){ - this.logger = LogManager.getContext(false).getLogger(loggerType.getType()); - } - - @VisibleForTesting - public ExtendedLogger getLogger() { - return logger; - } - - public void logWriteSuccess(AuditMessage msg) { - this.logger.logIfEnabled(FQCN, Level.INFO, WRITE_MARKER, msg, null); - } - - public void logWriteFailure(AuditMessage msg) { - this.logger.logIfEnabled(FQCN, Level.ERROR, WRITE_MARKER, msg, - msg.getThrowable()); - } - - public void logReadSuccess(AuditMessage msg) { - this.logger.logIfEnabled(FQCN, Level.INFO, READ_MARKER, msg, null); - } - - public void logReadFailure(AuditMessage msg) { - this.logger.logIfEnabled(FQCN, Level.ERROR, READ_MARKER, msg, - msg.getThrowable()); - } - - public void logWrite(AuditMessage auditMessage) { - if (auditMessage.getThrowable() == null) { - this.logger.logIfEnabled(FQCN, Level.INFO, WRITE_MARKER, auditMessage, - auditMessage.getThrowable()); - } else { - this.logger.logIfEnabled(FQCN, Level.ERROR, WRITE_MARKER, auditMessage, - auditMessage.getThrowable()); - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java deleted file mode 100644 index 18241c7712a52..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.audit; - -/** - * Enumeration for defining types of Audit Loggers in Ozone. - */ -public enum AuditLoggerType { - DNLOGGER("DNAudit"), - OMLOGGER("OMAudit"), - SCMLOGGER("SCMAudit"); - - private String type; - - public String getType() { - return type; - } - - AuditLoggerType(String type){ - this.type = type; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java deleted file mode 100644 index 505b958071590..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.audit; - -import org.apache.logging.log4j.Marker; -import org.apache.logging.log4j.MarkerManager; - -/** - * Defines audit marker types. - */ -public enum AuditMarker { - WRITE(MarkerManager.getMarker("WRITE")), - READ(MarkerManager.getMarker("READ")); - - private Marker marker; - - AuditMarker(Marker marker){ - this.marker = marker; - } - - public Marker getMarker(){ - return marker; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java deleted file mode 100644 index 1569ffe3ba797..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java +++ /dev/null @@ -1,131 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.audit; - -import org.apache.logging.log4j.message.Message; - -import java.util.Map; - -/** - * Defines audit message structure. - */ -public class AuditMessage implements Message { - - private String message; - private Throwable throwable; - - private static final String MSG_PATTERN = - "user=%s | ip=%s | op=%s %s | ret=%s"; - - public AuditMessage(){ - - } - - @Override - public String getFormattedMessage() { - return message; - } - - @Override - public String getFormat() { - return null; - } - - @Override - public Object[] getParameters() { - return new Object[0]; - } - - @Override - public Throwable getThrowable() { - return throwable; - } - - /** - * Use when there are custom string to be added to default msg. - * @param customMessage custom string - */ - private void appendMessage(String customMessage) { - this.message += customMessage; - } - - public String getMessage() { - return message; - } - - public void setMessage(String message) { - this.message = message; - } - - public void setThrowable(Throwable throwable) { - this.throwable = throwable; - } - - /** - * Builder class for AuditMessage. - */ - public static class Builder { - private Throwable throwable; - private String user; - private String ip; - private String op; - private Map params; - private String ret; - - public Builder(){ - - } - - public Builder setUser(String usr){ - this.user = usr; - return this; - } - - public Builder atIp(String ipAddr){ - this.ip = ipAddr; - return this; - } - - public Builder forOperation(String operation){ - this.op = operation; - return this; - } - - public Builder withParams(Map args){ - this.params = args; - return this; - } - - public Builder withResult(String result){ - this.ret = result; - return this; - } - - public Builder withException(Throwable ex){ - this.throwable = ex; - return this; - } - - public AuditMessage build(){ - AuditMessage auditMessage = new AuditMessage(); - auditMessage.message = String.format(MSG_PATTERN, - this.user, this.ip, this.op, this.params, this.ret); - auditMessage.throwable = this.throwable; - return auditMessage; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java deleted file mode 100644 index 9d7dbee35b007..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.audit; - -import java.util.Map; - -/** - * Interface to make an entity auditable. - */ -public interface Auditable { - /** - * Must override in implementation. - * @return {@literal Map} with values to be logged in audit. - */ - Map toAuditMap(); -} - diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java deleted file mode 100644 index 51c029868bfc2..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.audit; - -import java.util.Map; - -/** - * Interface to mark an actor as Auditor. - */ -public interface Auditor { - - AuditMessage buildAuditMessageForSuccess( - AuditAction op, Map auditMap); - - AuditMessage buildAuditMessageForFailure( - AuditAction op, Map auditMap, Throwable throwable); - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java deleted file mode 100644 index 1c87f2bdebad4..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.audit; - -/** - * Enum to define Audit Action types for Datanode. - */ -public enum DNAction implements AuditAction { - - CREATE_CONTAINER, - READ_CONTAINER, - UPDATE_CONTAINER, - DELETE_CONTAINER, - LIST_CONTAINER, - PUT_BLOCK, - GET_BLOCK, - DELETE_BLOCK, - LIST_BLOCK, - READ_CHUNK, - DELETE_CHUNK, - WRITE_CHUNK, - LIST_CHUNK, - COMPACT_CHUNK, - PUT_SMALL_FILE, - GET_SMALL_FILE, - CLOSE_CONTAINER, - GET_COMMITTED_BLOCK_LENGTH; - - @Override - public String getAction() { - return this.toString(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java deleted file mode 100644 index d03ad157220af..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.audit; - -/** - * Enum to define Audit Action types for SCM. - */ -public enum SCMAction implements AuditAction { - - GET_VERSION, - REGISTER, - SEND_HEARTBEAT, - GET_SCM_INFO, - ALLOCATE_BLOCK, - DELETE_KEY_BLOCK, - ALLOCATE_CONTAINER, - GET_CONTAINER, - GET_CONTAINER_WITH_PIPELINE, - LIST_CONTAINER, - LIST_PIPELINE, - CLOSE_PIPELINE, - ACTIVATE_PIPELINE, - DEACTIVATE_PIPELINE, - DELETE_CONTAINER, - IN_SAFE_MODE, - FORCE_EXIT_SAFE_MODE, - SORT_DATANODE, - START_REPLICATION_MANAGER, - STOP_REPLICATION_MANAGER, - GET_REPLICATION_MANAGER_STATUS; - - @Override - public String getAction() { - return this.toString(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java deleted file mode 100644 index c8284fd8ff3cc..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java +++ /dev/null @@ -1,138 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.audit; -/** - ****************************************************************************** - * Important - * 1. Any changes to classes in this package can render the logging - * framework broken. - * 2. The logger framework has been designed keeping in mind future - * plans to build a log parser. - * 3. Please exercise great caution when attempting changes in this package. - ****************************************************************************** - * - * - * This package lays the foundation for Audit logging in Ozone. - * AuditLogging in Ozone has been built using log4j2 which brings in new - * features that facilitate turning on/off selective audit events by using - * MarkerFilter, checking for change in logging configuration periodically - * and reloading the changes, use of disruptor framework for improved - * Asynchronous logging. - * - * The log4j2 configurations can be specified in XML, YAML, JSON and - * Properties file. For Ozone, we are using the Properties file due to sheer - * simplicity, readability and ease of modification. - * - * log4j2 configuration file can be passed to startup command with option - * -Dlog4j.configurationFile unlike -Dlog4j.configuration in log4j 1.x - * - ****************************************************************************** - * Understanding the Audit Logging framework in Ozone. - ****************************************************************************** - * **** Auditable *** - * This is an interface to mark an entity as auditable. - * This interface must be implemented by entities requiring audit logging. - * For example - OMVolumeArgs, OMBucketArgs. - * The implementing class must override toAuditMap() to return an - * instance of Map where both Key and Value are String. - * - * Key: must contain printable US ASCII characters - * May not contain a space, =, ], or " - * If the key is multi word then use camel case. - * - * Value: if it is a collection/array, then it must be converted to a comma - * delimited string - * - * *** AuditAction *** - * This is an interface to define the various type of actions to be audited. - * To ensure separation of concern, for each sub-component you must create an - * Enum to implement AuditAction. - * Structure of Enum can be referred from the test class DummyAction. - * - * For starters, we expect following 3 implementations of AuditAction: - * OMAction - to define action types for Ozone Manager - * SCMAction - to define action types for Storage Container manager - * DNAction - to define action types for Datanode - * - * *** AuditEventStatus *** - * Enum to define Audit event status like success and failure. - * This is used in AuditLogger.logXXX() methods. - * - * * *** AuditLogger *** - * This is where the audit logging magic unfolds. - * The class has 2 Markers defined - READ and WRITE. - * These markers are used to tag when logging events. - * - * *** AuditLoggerType *** - * Enum to define the various AuditLoggers in Ozone - * - * *** AuditMarker *** - * Enum to define various Audit Markers used in AuditLogging. - * - * *** AuditMessage *** - * Entity to define an audit message to be logged - * It will generate a message formatted as: - * user=xxx ip=xxx op=XXXX_XXXX {key=val, key1=val1..} ret=XXXXXX - * - * *** Auditor *** - * Interface to mark an actor class as Auditor - * Must be implemented by class where we want to log audit events - * Implementing class must override and implement methods - * buildAuditMessageForSuccess and buildAuditMessageForFailure. - * - * **************************************************************************** - * Usage - * **************************************************************************** - * Using the AuditLogger to log events: - * 1. Get a logger by specifying the appropriate logger type - * Example: ExtendedLogger AUDIT = new AuditLogger(AuditLoggerType.OMLogger) - * - * 2. Construct an instance of AuditMessage - * - * 3. Log Read/Write and Success/Failure event as needed. - * Example - * AUDIT.logWriteSuccess(buildAuditMessageForSuccess(params)) - * - * 4. Log Level implicitly defaults to INFO for xxxxSuccess() and ERROR for - * xxxxFailure() - * AUDIT.logWriteSuccess(buildAuditMessageForSuccess(params)) - * AUDIT.logWriteFailure(buildAuditMessageForSuccess(params)) - * - * See sample invocations in src/test in the following class: - * org.apache.hadoop.ozone.audit.TestOzoneAuditLogger - * - * **************************************************************************** - * Defining new Logger types - * **************************************************************************** - * New Logger type can be added with following steps: - * 1. Update AuditLoggerType to add the new type - * 2. Create new Enum by implementing AuditAction if needed - * 3. Ensure the required entity implements Auditable - * - * **************************************************************************** - * Defining new Marker types - * **************************************************************************** - * New Markers can be configured as follows: - * 1. Define new markers in AuditMarker - * 2. Get the Marker in AuditLogger for use in the log methods, example: - * private static final Marker WRITE_MARKER = AuditMarker.WRITE.getMarker(); - * 3. Define log methods in AuditLogger to use the new Marker type - * 4. Call these new methods from the required classes to audit with these - * new markers - * 5. The marker based filtering can be configured in log4j2 configurations - * Refer log4j2.properties in src/test/resources for a sample. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java deleted file mode 100644 index 1925c22aa23c3..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.common; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos - .KeyBlocks; - -import java.util.ArrayList; -import java.util.List; - -/** - * A group of blocks relations relevant, e.g belong to a certain object key. - */ -public final class BlockGroup { - - private String groupID; - private List blockIDs; - private BlockGroup(String groupID, List blockIDs) { - this.groupID = groupID; - this.blockIDs = blockIDs; - } - - public List getBlockIDList() { - return blockIDs; - } - - public String getGroupID() { - return groupID; - } - - public KeyBlocks getProto() { - KeyBlocks.Builder kbb = KeyBlocks.newBuilder(); - for (BlockID block : blockIDs) { - kbb.addBlocks(block.getProtobuf()); - } - return kbb.setKey(groupID).build(); - } - - /** - * Parses a KeyBlocks proto to a group of blocks. - * @param proto KeyBlocks proto. - * @return a group of blocks. - */ - public static BlockGroup getFromProto(KeyBlocks proto) { - List blockIDs = new ArrayList<>(); - for (HddsProtos.BlockID block : proto.getBlocksList()) { - blockIDs.add(new BlockID(block.getContainerBlockID().getContainerID(), - block.getContainerBlockID().getLocalID())); - } - return BlockGroup.newBuilder().setKeyName(proto.getKey()) - .addAllBlockIDs(blockIDs).build(); - } - - public static Builder newBuilder() { - return new Builder(); - } - - @Override - public String toString() { - return "BlockGroup[" + - "groupID='" + groupID + '\'' + - ", blockIDs=" + blockIDs + - ']'; - } - - /** - * BlockGroup instance builder. - */ - public static class Builder { - - private String groupID; - private List blockIDs; - - public Builder setKeyName(String blockGroupID) { - this.groupID = blockGroupID; - return this; - } - - public Builder addAllBlockIDs(List keyBlocks) { - this.blockIDs = keyBlocks; - return this; - } - - public BlockGroup build() { - return new BlockGroup(groupID, blockIDs); - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java deleted file mode 100644 index 0e70515a492df..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java +++ /dev/null @@ -1,286 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.primitives.Longs; - -import java.nio.ByteBuffer; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ChecksumType; -import org.apache.hadoop.io.MD5Hash; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.util.PureJavaCrc32; -import org.apache.hadoop.util.PureJavaCrc32C; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class to compute and verify checksums for chunks. - * - * This class is not thread safe. - */ -public class Checksum { - - public static final Logger LOG = LoggerFactory.getLogger(Checksum.class); - - private final ChecksumType checksumType; - private final int bytesPerChecksum; - - private PureJavaCrc32 crc32Checksum; - private PureJavaCrc32C crc32cChecksum; - private MessageDigest sha; - - /** - * Constructs a Checksum object. - * @param type type of Checksum - * @param bytesPerChecksum number of bytes of data per checksum - */ - public Checksum(ChecksumType type, int bytesPerChecksum) { - this.checksumType = type; - this.bytesPerChecksum = bytesPerChecksum; - } - - /** - * Constructs a Checksum object with default ChecksumType and default - * BytesPerChecksum. - */ - @VisibleForTesting - public Checksum() { - this.checksumType = ChecksumType.valueOf( - OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT); - this.bytesPerChecksum = OzoneConfigKeys - .OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT_BYTES; // Default is 1MB - } - - /** - * Computes checksum for give data. - * @param byteBuffer input data in the form of ByteString. - * @return ChecksumData computed for input data. - */ - public ChecksumData computeChecksum(ByteBuffer byteBuffer) - throws OzoneChecksumException { - return computeChecksum(byteBuffer.array(), byteBuffer.position(), - byteBuffer.limit()); - } - - /** - * Computes checksum for give data. - * @param data input data in the form of byte array. - * @return ChecksumData computed for input data. - */ - public ChecksumData computeChecksum(byte[] data) - throws OzoneChecksumException { - return computeChecksum(data, 0, data.length); - } - - /** - * Computes checksum for give data. - * @param data input data in the form of byte array. - * @return ChecksumData computed for input data. - */ - public ChecksumData computeChecksum(byte[] data, int offset, int len) - throws OzoneChecksumException { - ChecksumData checksumData = new ChecksumData(this.checksumType, this - .bytesPerChecksum); - if (checksumType == ChecksumType.NONE) { - // Since type is set to NONE, we do not need to compute the checksums - return checksumData; - } - - switch (checksumType) { - case CRC32: - crc32Checksum = new PureJavaCrc32(); - break; - case CRC32C: - crc32cChecksum = new PureJavaCrc32C(); - break; - case SHA256: - try { - sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH); - } catch (NoSuchAlgorithmException e) { - throw new OzoneChecksumException(OzoneConsts.FILE_HASH, e); - } - break; - case MD5: - break; - default: - throw new OzoneChecksumException(checksumType); - } - - // Compute number of checksums needs for given data length based on bytes - // per checksum. - int dataSize = len - offset; - int numChecksums = (dataSize + bytesPerChecksum - 1) / bytesPerChecksum; - - // Checksum is computed for each bytesPerChecksum number of bytes of data - // starting at offset 0. The last checksum might be computed for the - // remaining data with length less than bytesPerChecksum. - List checksumList = new ArrayList<>(numChecksums); - for (int index = 0; index < numChecksums; index++) { - checksumList.add(computeChecksumAtIndex(data, index, offset, len)); - } - checksumData.setChecksums(checksumList); - - return checksumData; - } - - /** - * Computes checksum based on checksumType for a data block at given index - * and a max length of bytesPerChecksum. - * @param data input data - * @param index index to compute the offset from where data must be read - * @param start start pos of the array where the computation has to start - * @length length of array till which checksum needs to be computed - * @return computed checksum ByteString - * @throws OzoneChecksumException thrown when ChecksumType is not recognized - */ - private ByteString computeChecksumAtIndex(byte[] data, int index, int start, - int length) - throws OzoneChecksumException { - int offset = start + index * bytesPerChecksum; - int dataLength = length - start; - int len = bytesPerChecksum; - if ((offset + len) > dataLength) { - len = dataLength - offset; - } - byte[] checksumBytes = null; - switch (checksumType) { - case CRC32: - checksumBytes = computeCRC32Checksum(data, offset, len); - break; - case CRC32C: - checksumBytes = computeCRC32CChecksum(data, offset, len); - break; - case SHA256: - checksumBytes = computeSHA256Checksum(data, offset, len); - break; - case MD5: - checksumBytes = computeMD5Checksum(data, offset, len); - break; - default: - throw new OzoneChecksumException(checksumType); - } - - return ByteString.copyFrom(checksumBytes); - } - - /** - * Computes CRC32 checksum. - */ - private byte[] computeCRC32Checksum(byte[] data, int offset, int len) { - crc32Checksum.reset(); - crc32Checksum.update(data, offset, len); - return Longs.toByteArray(crc32Checksum.getValue()); - } - - /** - * Computes CRC32C checksum. - */ - private byte[] computeCRC32CChecksum(byte[] data, int offset, int len) { - crc32cChecksum.reset(); - crc32cChecksum.update(data, offset, len); - return Longs.toByteArray(crc32cChecksum.getValue()); - } - - /** - * Computes SHA-256 checksum. - */ - private byte[] computeSHA256Checksum(byte[] data, int offset, int len) { - sha.reset(); - sha.update(data, offset, len); - return sha.digest(); - } - - /** - * Computes MD5 checksum. - */ - private byte[] computeMD5Checksum(byte[] data, int offset, int len) { - MD5Hash md5out = MD5Hash.digest(data, offset, len); - return md5out.getDigest(); - } - - /** - * Computes the ChecksumData for the input data and verifies that it - * matches with that of the input checksumData, starting from index - * startIndex. - * @param byteString input data - * @param checksumData checksumData to match with - * @param startIndex index of first checksum in checksumData to match with - * data's computed checksum. - * @throws OzoneChecksumException is thrown if checksums do not match - */ - public static boolean verifyChecksum(ByteString byteString, - ChecksumData checksumData, int startIndex) throws OzoneChecksumException { - return verifyChecksum(byteString.toByteArray(), checksumData, startIndex); - } - - /** - * Computes the ChecksumData for the input data and verifies that it - * matches with that of the input checksumData. - * @param data input data - * @param checksumData checksumData to match with - * @throws OzoneChecksumException is thrown if checksums do not match - */ - public static boolean verifyChecksum(byte[] data, ChecksumData checksumData) - throws OzoneChecksumException { - return verifyChecksum(data, checksumData, 0); - } - - /** - * Computes the ChecksumData for the input data and verifies that it - * matches with that of the input checksumData. - * @param data input data - * @param checksumData checksumData to match with - * @param startIndex index of first checksum in checksumData to match with - * data's computed checksum. - * @throws OzoneChecksumException is thrown if checksums do not match - */ - public static boolean verifyChecksum(byte[] data, ChecksumData checksumData, - int startIndex) throws OzoneChecksumException { - ChecksumType checksumType = checksumData.getChecksumType(); - if (checksumType == ChecksumType.NONE) { - // Checksum is set to NONE. No further verification is required. - return true; - } - - int bytesPerChecksum = checksumData.getBytesPerChecksum(); - Checksum checksum = new Checksum(checksumType, bytesPerChecksum); - ChecksumData computedChecksumData = - checksum.computeChecksum(data, 0, data.length); - - return checksumData.verifyChecksumDataMatches(computedChecksumData, - startIndex); - } - - /** - * Returns a ChecksumData with type NONE for testing. - */ - @VisibleForTesting - public static ContainerProtos.ChecksumData getNoChecksumDataProto() { - return new ChecksumData(ChecksumType.NONE, 0).getProtoBufMessage(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java deleted file mode 100644 index 7ce643db4711c..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Some portions of this file Copyright (c) 2004-2006 Intel Corportation - * and licensed under the BSD license. - */ -package org.apache.hadoop.ozone.common; - -import org.apache.ratis.util.Preconditions; - -import java.nio.ByteBuffer; -import java.util.zip.Checksum; - -/** - * A sub-interface of {@link Checksum} - * with a method to update checksum from a {@link ByteBuffer}. - */ -public interface ChecksumByteBuffer extends Checksum { - /** - * Updates the current checksum with the specified bytes in the buffer. - * Upon return, the buffer's position will be equal to its limit. - * - * @param buffer the bytes to update the checksum with - */ - void update(ByteBuffer buffer); - - @Override - default void update(byte[] b, int off, int len) { - update(ByteBuffer.wrap(b, off, len).asReadOnlyBuffer()); - } - - /** - * An abstract class implementing {@link ChecksumByteBuffer} - * with a 32-bit checksum and a lookup table. - */ - @SuppressWarnings("innerassignment") - abstract class CrcIntTable implements ChecksumByteBuffer { - /** Current CRC value with bit-flipped. */ - private int crc; - - CrcIntTable() { - reset(); - Preconditions.assertTrue(getTable().length == 8 * (1 << 8)); - } - - abstract int[] getTable(); - - @Override - public final long getValue() { - return (~crc) & 0xffffffffL; - } - - @Override - public final void reset() { - crc = 0xffffffff; - } - - @Override - public final void update(int b) { - crc = (crc >>> 8) ^ getTable()[(((crc ^ b) << 24) >>> 24)]; - } - - @Override - public final void update(ByteBuffer b) { - crc = update(crc, b, getTable()); - } - - private static int update(int crc, ByteBuffer b, int[] table) { - for(; b.remaining() > 7;) { - final int c0 = (b.get() ^ crc) & 0xff; - final int c1 = (b.get() ^ (crc >>>= 8)) & 0xff; - final int c2 = (b.get() ^ (crc >>>= 8)) & 0xff; - final int c3 = (b.get() ^ (crc >>> 8)) & 0xff; - crc = (table[0x700 + c0] ^ table[0x600 + c1]) - ^ (table[0x500 + c2] ^ table[0x400 + c3]); - - final int c4 = b.get() & 0xff; - final int c5 = b.get() & 0xff; - final int c6 = b.get() & 0xff; - final int c7 = b.get() & 0xff; - - crc ^= (table[0x300 + c4] ^ table[0x200 + c5]) - ^ (table[0x100 + c6] ^ table[c7]); - } - - // loop unroll - duff's device style - switch (b.remaining()) { - case 7: - crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; - case 6: - crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; - case 5: - crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; - case 4: - crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; - case 3: - crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; - case 2: - crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; - case 1: - crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; - default: // noop - } - - return crc; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java deleted file mode 100644 index 4a927fbae6cd9..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import java.util.List; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ChecksumType; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; - -/** - * Java class that represents Checksum ProtoBuf class. This helper class allows - * us to convert to and from protobuf to normal java. - */ -public class ChecksumData { - - private ChecksumType type; - // Checksum will be computed for every bytesPerChecksum number of bytes and - // stored sequentially in checksumList - private int bytesPerChecksum; - private List checksums; - - public ChecksumData(ChecksumType checksumType, int bytesPerChecksum) { - this(checksumType, bytesPerChecksum, Lists.newArrayList()); - } - - public ChecksumData(ChecksumType checksumType, int bytesPerChecksum, - List checksums) { - this.type = checksumType; - this.bytesPerChecksum = bytesPerChecksum; - this.checksums = checksums; - } - - /** - * Getter method for checksumType. - */ - public ChecksumType getChecksumType() { - return this.type; - } - - /** - * Getter method for bytesPerChecksum. - */ - public int getBytesPerChecksum() { - return this.bytesPerChecksum; - } - - /** - * Getter method for checksums. - */ - @VisibleForTesting - public List getChecksums() { - return this.checksums; - } - - /** - * Setter method for checksums. - * @param checksumList list of checksums - */ - public void setChecksums(List checksumList) { - this.checksums.clear(); - this.checksums.addAll(checksumList); - } - - /** - * Construct the Checksum ProtoBuf message. - * @return Checksum ProtoBuf message - */ - public ContainerProtos.ChecksumData getProtoBufMessage() { - ContainerProtos.ChecksumData.Builder checksumProtoBuilder = - ContainerProtos.ChecksumData.newBuilder() - .setType(this.type) - .setBytesPerChecksum(this.bytesPerChecksum); - - checksumProtoBuilder.addAllChecksums(checksums); - - return checksumProtoBuilder.build(); - } - - /** - * Constructs Checksum class object from the Checksum ProtoBuf message. - * @param checksumDataProto Checksum ProtoBuf message - * @return ChecksumData object representing the proto - */ - public static ChecksumData getFromProtoBuf( - ContainerProtos.ChecksumData checksumDataProto) { - Preconditions.checkNotNull(checksumDataProto); - - ChecksumData checksumData = new ChecksumData( - checksumDataProto.getType(), checksumDataProto.getBytesPerChecksum()); - - if (checksumDataProto.getChecksumsCount() != 0) { - checksumData.setChecksums(checksumDataProto.getChecksumsList()); - } - - return checksumData; - } - - /** - * Verify that this ChecksumData from startIndex to endIndex matches with the - * provided ChecksumData. - * The checksum at startIndex of this ChecksumData will be matched with the - * checksum at index 0 of the provided ChecksumData, and checksum at - * (startIndex + 1) of this ChecksumData with checksum at index 1 of - * provided ChecksumData and so on. - * @param that the ChecksumData to match with - * @param startIndex index of the first checksum from this ChecksumData - * which will be used to compare checksums - * @return true if checksums match - * @throws OzoneChecksumException - */ - public boolean verifyChecksumDataMatches(ChecksumData that, int startIndex) - throws OzoneChecksumException { - - // pre checks - if (this.checksums.size() == 0) { - throw new OzoneChecksumException("Original checksumData has no " + - "checksums"); - } - - if (that.checksums.size() == 0) { - throw new OzoneChecksumException("Computed checksumData has no " + - "checksums"); - } - - int numChecksums = that.checksums.size(); - - try { - // Verify that checksum matches at each index - for (int index = 0; index < numChecksums; index++) { - if (!matchChecksumAtIndex(this.checksums.get(startIndex + index), - that.checksums.get(index))) { - // checksum mismatch. throw exception. - throw new OzoneChecksumException(index); - } - } - } catch (ArrayIndexOutOfBoundsException e) { - throw new OzoneChecksumException("Computed checksum has " - + numChecksums + " number of checksums. Original checksum has " + - (this.checksums.size() - startIndex) + " number of checksums " + - "starting from index " + startIndex); - } - return true; - } - - private static boolean matchChecksumAtIndex( - ByteString expectedChecksumAtIndex, ByteString computedChecksumAtIndex) { - return expectedChecksumAtIndex.equals(computedChecksumAtIndex); - } - - @Override - public boolean equals(Object obj) { - if (!(obj instanceof ChecksumData)) { - return false; - } - - ChecksumData that = (ChecksumData) obj; - - if (!this.type.equals(that.getChecksumType())) { - return false; - } - if (this.bytesPerChecksum != that.getBytesPerChecksum()) { - return false; - } - if (this.checksums.size() != that.checksums.size()) { - return false; - } - - // Match checksum at each index - for (int index = 0; index < this.checksums.size(); index++) { - if (!matchChecksumAtIndex(this.checksums.get(index), - that.checksums.get(index))) { - return false; - } - } - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder hc = new HashCodeBuilder(); - hc.append(type); - hc.append(bytesPerChecksum); - hc.append(checksums.toArray()); - return hc.toHashCode(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java deleted file mode 100644 index 892b6951534a9..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos - .DeleteScmBlockResult; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos - .DeleteScmBlockResult.Result; - -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -/** - * Result to delete a group of blocks. - */ -public class DeleteBlockGroupResult { - private String objectKey; - private List blockResultList; - public DeleteBlockGroupResult(String objectKey, - List blockResultList) { - this.objectKey = objectKey; - this.blockResultList = blockResultList; - } - - public String getObjectKey() { - return objectKey; - } - - public List getBlockResultList() { - return blockResultList; - } - - public List getBlockResultProtoList() { - List resultProtoList = - new ArrayList<>(blockResultList.size()); - for (DeleteBlockResult result : blockResultList) { - DeleteScmBlockResult proto = DeleteScmBlockResult.newBuilder() - .setBlockID(result.getBlockID().getProtobuf()) - .setResult(result.getResult()).build(); - resultProtoList.add(proto); - } - return resultProtoList; - } - - public static List convertBlockResultProto( - List results) { - List protoResults = new ArrayList<>(results.size()); - for (DeleteScmBlockResult result : results) { - protoResults.add(new DeleteBlockResult(BlockID.getFromProtobuf( - result.getBlockID()), result.getResult())); - } - return protoResults; - } - - /** - * Only if all blocks are successfully deleted, this group is considered - * to be successfully executed. - * - * @return true if all blocks are successfully deleted, false otherwise. - */ - public boolean isSuccess() { - for (DeleteBlockResult result : blockResultList) { - if (result.getResult() != Result.success) { - return false; - } - } - return true; - } - - /** - * @return A list of deletion failed block IDs. - */ - public List getFailedBlocks() { - List failedBlocks = blockResultList.stream() - .filter(result -> result.getResult() != Result.success) - .map(DeleteBlockResult::getBlockID).collect(Collectors.toList()); - return failedBlocks; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java deleted file mode 100644 index 518b519478157..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -import java.io.File; -import java.io.IOException; - -/** - * The exception is thrown when file system state is inconsistent - * and is not recoverable. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class InconsistentStorageStateException extends IOException { - private static final long serialVersionUID = 1L; - - public InconsistentStorageStateException(String descr) { - super(descr); - } - - public InconsistentStorageStateException(File dir, String descr) { - super("Directory " + getFilePath(dir) + " is in an inconsistent state: " - + descr); - } - - private static String getFilePath(File dir) { - try { - return dir.getCanonicalPath(); - } catch (IOException e) { - } - return dir.getPath(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java deleted file mode 100644 index 20e40af09f3ed..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import java.io.IOException; -import java.security.NoSuchAlgorithmException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -/** Thrown for checksum errors. */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class OzoneChecksumException extends IOException { - - /** - * OzoneChecksumException to throw when checksum verfication fails. - * @param index checksum list index at which checksum match failed - */ - public OzoneChecksumException(int index) { - super(String.format("Checksum mismatch at index %d", index)); - } - - /** - * OzoneChecksumException to throw when unrecognized checksumType is given. - * @param unrecognizedChecksumType - */ - public OzoneChecksumException( - ContainerProtos.ChecksumType unrecognizedChecksumType) { - super(String.format("Unrecognized ChecksumType: %s", - unrecognizedChecksumType)); - } - - /** - * OzoneChecksumException to wrap around NoSuchAlgorithmException. - * @param algorithm name of algorithm - * @param ex original exception thrown - */ - public OzoneChecksumException( - String algorithm, NoSuchAlgorithmException ex) { - super(String.format("NoSuchAlgorithmException thrown while computing " + - "SHA-256 checksum using algorithm %s", algorithm), ex); - } - - /** - * OzoneChecksumException to throw with custom message. - */ - public OzoneChecksumException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java deleted file mode 100644 index 0d1f6307501a3..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java +++ /dev/null @@ -1,556 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -/** - * Similar to {@link org.apache.hadoop.util.PureJavaCrc32} - * except that this class implement {@link ChecksumByteBuffer}. - */ -final class PureJavaCrc32ByteBuffer extends ChecksumByteBuffer.CrcIntTable { - @Override - int[] getTable() { - return T; - } - - /** - * CRC-32 lookup table generated by the polynomial 0xEDB88320. - * See also org.apache.hadoop.util.TestPureJavaCrc32.Table. - */ - private static final int[] T = { - /* T8_0 */ - 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, - 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, - 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, - 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, - 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, - 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, - 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, - 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, - 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, - 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, - 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, - 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, - 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, - 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, - 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, - 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, - 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, - 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, - 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, - 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, - 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, - 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, - 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, - 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, - 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, - 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, - 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, - 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, - 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, - 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, - 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, - 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, - 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, - 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, - 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, - 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, - 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, - 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, - 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, - 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, - 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, - 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, - 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, - 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, - 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, - 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, - 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, - 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, - 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, - 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, - 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, - 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, - 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, - 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, - 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, - 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, - 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, - 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, - 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, - 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, - 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, - 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, - 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, - 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D, - /* T8_1 */ - 0x00000000, 0x191B3141, 0x32366282, 0x2B2D53C3, - 0x646CC504, 0x7D77F445, 0x565AA786, 0x4F4196C7, - 0xC8D98A08, 0xD1C2BB49, 0xFAEFE88A, 0xE3F4D9CB, - 0xACB54F0C, 0xB5AE7E4D, 0x9E832D8E, 0x87981CCF, - 0x4AC21251, 0x53D92310, 0x78F470D3, 0x61EF4192, - 0x2EAED755, 0x37B5E614, 0x1C98B5D7, 0x05838496, - 0x821B9859, 0x9B00A918, 0xB02DFADB, 0xA936CB9A, - 0xE6775D5D, 0xFF6C6C1C, 0xD4413FDF, 0xCD5A0E9E, - 0x958424A2, 0x8C9F15E3, 0xA7B24620, 0xBEA97761, - 0xF1E8E1A6, 0xE8F3D0E7, 0xC3DE8324, 0xDAC5B265, - 0x5D5DAEAA, 0x44469FEB, 0x6F6BCC28, 0x7670FD69, - 0x39316BAE, 0x202A5AEF, 0x0B07092C, 0x121C386D, - 0xDF4636F3, 0xC65D07B2, 0xED705471, 0xF46B6530, - 0xBB2AF3F7, 0xA231C2B6, 0x891C9175, 0x9007A034, - 0x179FBCFB, 0x0E848DBA, 0x25A9DE79, 0x3CB2EF38, - 0x73F379FF, 0x6AE848BE, 0x41C51B7D, 0x58DE2A3C, - 0xF0794F05, 0xE9627E44, 0xC24F2D87, 0xDB541CC6, - 0x94158A01, 0x8D0EBB40, 0xA623E883, 0xBF38D9C2, - 0x38A0C50D, 0x21BBF44C, 0x0A96A78F, 0x138D96CE, - 0x5CCC0009, 0x45D73148, 0x6EFA628B, 0x77E153CA, - 0xBABB5D54, 0xA3A06C15, 0x888D3FD6, 0x91960E97, - 0xDED79850, 0xC7CCA911, 0xECE1FAD2, 0xF5FACB93, - 0x7262D75C, 0x6B79E61D, 0x4054B5DE, 0x594F849F, - 0x160E1258, 0x0F152319, 0x243870DA, 0x3D23419B, - 0x65FD6BA7, 0x7CE65AE6, 0x57CB0925, 0x4ED03864, - 0x0191AEA3, 0x188A9FE2, 0x33A7CC21, 0x2ABCFD60, - 0xAD24E1AF, 0xB43FD0EE, 0x9F12832D, 0x8609B26C, - 0xC94824AB, 0xD05315EA, 0xFB7E4629, 0xE2657768, - 0x2F3F79F6, 0x362448B7, 0x1D091B74, 0x04122A35, - 0x4B53BCF2, 0x52488DB3, 0x7965DE70, 0x607EEF31, - 0xE7E6F3FE, 0xFEFDC2BF, 0xD5D0917C, 0xCCCBA03D, - 0x838A36FA, 0x9A9107BB, 0xB1BC5478, 0xA8A76539, - 0x3B83984B, 0x2298A90A, 0x09B5FAC9, 0x10AECB88, - 0x5FEF5D4F, 0x46F46C0E, 0x6DD93FCD, 0x74C20E8C, - 0xF35A1243, 0xEA412302, 0xC16C70C1, 0xD8774180, - 0x9736D747, 0x8E2DE606, 0xA500B5C5, 0xBC1B8484, - 0x71418A1A, 0x685ABB5B, 0x4377E898, 0x5A6CD9D9, - 0x152D4F1E, 0x0C367E5F, 0x271B2D9C, 0x3E001CDD, - 0xB9980012, 0xA0833153, 0x8BAE6290, 0x92B553D1, - 0xDDF4C516, 0xC4EFF457, 0xEFC2A794, 0xF6D996D5, - 0xAE07BCE9, 0xB71C8DA8, 0x9C31DE6B, 0x852AEF2A, - 0xCA6B79ED, 0xD37048AC, 0xF85D1B6F, 0xE1462A2E, - 0x66DE36E1, 0x7FC507A0, 0x54E85463, 0x4DF36522, - 0x02B2F3E5, 0x1BA9C2A4, 0x30849167, 0x299FA026, - 0xE4C5AEB8, 0xFDDE9FF9, 0xD6F3CC3A, 0xCFE8FD7B, - 0x80A96BBC, 0x99B25AFD, 0xB29F093E, 0xAB84387F, - 0x2C1C24B0, 0x350715F1, 0x1E2A4632, 0x07317773, - 0x4870E1B4, 0x516BD0F5, 0x7A468336, 0x635DB277, - 0xCBFAD74E, 0xD2E1E60F, 0xF9CCB5CC, 0xE0D7848D, - 0xAF96124A, 0xB68D230B, 0x9DA070C8, 0x84BB4189, - 0x03235D46, 0x1A386C07, 0x31153FC4, 0x280E0E85, - 0x674F9842, 0x7E54A903, 0x5579FAC0, 0x4C62CB81, - 0x8138C51F, 0x9823F45E, 0xB30EA79D, 0xAA1596DC, - 0xE554001B, 0xFC4F315A, 0xD7626299, 0xCE7953D8, - 0x49E14F17, 0x50FA7E56, 0x7BD72D95, 0x62CC1CD4, - 0x2D8D8A13, 0x3496BB52, 0x1FBBE891, 0x06A0D9D0, - 0x5E7EF3EC, 0x4765C2AD, 0x6C48916E, 0x7553A02F, - 0x3A1236E8, 0x230907A9, 0x0824546A, 0x113F652B, - 0x96A779E4, 0x8FBC48A5, 0xA4911B66, 0xBD8A2A27, - 0xF2CBBCE0, 0xEBD08DA1, 0xC0FDDE62, 0xD9E6EF23, - 0x14BCE1BD, 0x0DA7D0FC, 0x268A833F, 0x3F91B27E, - 0x70D024B9, 0x69CB15F8, 0x42E6463B, 0x5BFD777A, - 0xDC656BB5, 0xC57E5AF4, 0xEE530937, 0xF7483876, - 0xB809AEB1, 0xA1129FF0, 0x8A3FCC33, 0x9324FD72, - /* T8_2 */ - 0x00000000, 0x01C26A37, 0x0384D46E, 0x0246BE59, - 0x0709A8DC, 0x06CBC2EB, 0x048D7CB2, 0x054F1685, - 0x0E1351B8, 0x0FD13B8F, 0x0D9785D6, 0x0C55EFE1, - 0x091AF964, 0x08D89353, 0x0A9E2D0A, 0x0B5C473D, - 0x1C26A370, 0x1DE4C947, 0x1FA2771E, 0x1E601D29, - 0x1B2F0BAC, 0x1AED619B, 0x18ABDFC2, 0x1969B5F5, - 0x1235F2C8, 0x13F798FF, 0x11B126A6, 0x10734C91, - 0x153C5A14, 0x14FE3023, 0x16B88E7A, 0x177AE44D, - 0x384D46E0, 0x398F2CD7, 0x3BC9928E, 0x3A0BF8B9, - 0x3F44EE3C, 0x3E86840B, 0x3CC03A52, 0x3D025065, - 0x365E1758, 0x379C7D6F, 0x35DAC336, 0x3418A901, - 0x3157BF84, 0x3095D5B3, 0x32D36BEA, 0x331101DD, - 0x246BE590, 0x25A98FA7, 0x27EF31FE, 0x262D5BC9, - 0x23624D4C, 0x22A0277B, 0x20E69922, 0x2124F315, - 0x2A78B428, 0x2BBADE1F, 0x29FC6046, 0x283E0A71, - 0x2D711CF4, 0x2CB376C3, 0x2EF5C89A, 0x2F37A2AD, - 0x709A8DC0, 0x7158E7F7, 0x731E59AE, 0x72DC3399, - 0x7793251C, 0x76514F2B, 0x7417F172, 0x75D59B45, - 0x7E89DC78, 0x7F4BB64F, 0x7D0D0816, 0x7CCF6221, - 0x798074A4, 0x78421E93, 0x7A04A0CA, 0x7BC6CAFD, - 0x6CBC2EB0, 0x6D7E4487, 0x6F38FADE, 0x6EFA90E9, - 0x6BB5866C, 0x6A77EC5B, 0x68315202, 0x69F33835, - 0x62AF7F08, 0x636D153F, 0x612BAB66, 0x60E9C151, - 0x65A6D7D4, 0x6464BDE3, 0x662203BA, 0x67E0698D, - 0x48D7CB20, 0x4915A117, 0x4B531F4E, 0x4A917579, - 0x4FDE63FC, 0x4E1C09CB, 0x4C5AB792, 0x4D98DDA5, - 0x46C49A98, 0x4706F0AF, 0x45404EF6, 0x448224C1, - 0x41CD3244, 0x400F5873, 0x4249E62A, 0x438B8C1D, - 0x54F16850, 0x55330267, 0x5775BC3E, 0x56B7D609, - 0x53F8C08C, 0x523AAABB, 0x507C14E2, 0x51BE7ED5, - 0x5AE239E8, 0x5B2053DF, 0x5966ED86, 0x58A487B1, - 0x5DEB9134, 0x5C29FB03, 0x5E6F455A, 0x5FAD2F6D, - 0xE1351B80, 0xE0F771B7, 0xE2B1CFEE, 0xE373A5D9, - 0xE63CB35C, 0xE7FED96B, 0xE5B86732, 0xE47A0D05, - 0xEF264A38, 0xEEE4200F, 0xECA29E56, 0xED60F461, - 0xE82FE2E4, 0xE9ED88D3, 0xEBAB368A, 0xEA695CBD, - 0xFD13B8F0, 0xFCD1D2C7, 0xFE976C9E, 0xFF5506A9, - 0xFA1A102C, 0xFBD87A1B, 0xF99EC442, 0xF85CAE75, - 0xF300E948, 0xF2C2837F, 0xF0843D26, 0xF1465711, - 0xF4094194, 0xF5CB2BA3, 0xF78D95FA, 0xF64FFFCD, - 0xD9785D60, 0xD8BA3757, 0xDAFC890E, 0xDB3EE339, - 0xDE71F5BC, 0xDFB39F8B, 0xDDF521D2, 0xDC374BE5, - 0xD76B0CD8, 0xD6A966EF, 0xD4EFD8B6, 0xD52DB281, - 0xD062A404, 0xD1A0CE33, 0xD3E6706A, 0xD2241A5D, - 0xC55EFE10, 0xC49C9427, 0xC6DA2A7E, 0xC7184049, - 0xC25756CC, 0xC3953CFB, 0xC1D382A2, 0xC011E895, - 0xCB4DAFA8, 0xCA8FC59F, 0xC8C97BC6, 0xC90B11F1, - 0xCC440774, 0xCD866D43, 0xCFC0D31A, 0xCE02B92D, - 0x91AF9640, 0x906DFC77, 0x922B422E, 0x93E92819, - 0x96A63E9C, 0x976454AB, 0x9522EAF2, 0x94E080C5, - 0x9FBCC7F8, 0x9E7EADCF, 0x9C381396, 0x9DFA79A1, - 0x98B56F24, 0x99770513, 0x9B31BB4A, 0x9AF3D17D, - 0x8D893530, 0x8C4B5F07, 0x8E0DE15E, 0x8FCF8B69, - 0x8A809DEC, 0x8B42F7DB, 0x89044982, 0x88C623B5, - 0x839A6488, 0x82580EBF, 0x801EB0E6, 0x81DCDAD1, - 0x8493CC54, 0x8551A663, 0x8717183A, 0x86D5720D, - 0xA9E2D0A0, 0xA820BA97, 0xAA6604CE, 0xABA46EF9, - 0xAEEB787C, 0xAF29124B, 0xAD6FAC12, 0xACADC625, - 0xA7F18118, 0xA633EB2F, 0xA4755576, 0xA5B73F41, - 0xA0F829C4, 0xA13A43F3, 0xA37CFDAA, 0xA2BE979D, - 0xB5C473D0, 0xB40619E7, 0xB640A7BE, 0xB782CD89, - 0xB2CDDB0C, 0xB30FB13B, 0xB1490F62, 0xB08B6555, - 0xBBD72268, 0xBA15485F, 0xB853F606, 0xB9919C31, - 0xBCDE8AB4, 0xBD1CE083, 0xBF5A5EDA, 0xBE9834ED, - /* T8_3 */ - 0x00000000, 0xB8BC6765, 0xAA09C88B, 0x12B5AFEE, - 0x8F629757, 0x37DEF032, 0x256B5FDC, 0x9DD738B9, - 0xC5B428EF, 0x7D084F8A, 0x6FBDE064, 0xD7018701, - 0x4AD6BFB8, 0xF26AD8DD, 0xE0DF7733, 0x58631056, - 0x5019579F, 0xE8A530FA, 0xFA109F14, 0x42ACF871, - 0xDF7BC0C8, 0x67C7A7AD, 0x75720843, 0xCDCE6F26, - 0x95AD7F70, 0x2D111815, 0x3FA4B7FB, 0x8718D09E, - 0x1ACFE827, 0xA2738F42, 0xB0C620AC, 0x087A47C9, - 0xA032AF3E, 0x188EC85B, 0x0A3B67B5, 0xB28700D0, - 0x2F503869, 0x97EC5F0C, 0x8559F0E2, 0x3DE59787, - 0x658687D1, 0xDD3AE0B4, 0xCF8F4F5A, 0x7733283F, - 0xEAE41086, 0x525877E3, 0x40EDD80D, 0xF851BF68, - 0xF02BF8A1, 0x48979FC4, 0x5A22302A, 0xE29E574F, - 0x7F496FF6, 0xC7F50893, 0xD540A77D, 0x6DFCC018, - 0x359FD04E, 0x8D23B72B, 0x9F9618C5, 0x272A7FA0, - 0xBAFD4719, 0x0241207C, 0x10F48F92, 0xA848E8F7, - 0x9B14583D, 0x23A83F58, 0x311D90B6, 0x89A1F7D3, - 0x1476CF6A, 0xACCAA80F, 0xBE7F07E1, 0x06C36084, - 0x5EA070D2, 0xE61C17B7, 0xF4A9B859, 0x4C15DF3C, - 0xD1C2E785, 0x697E80E0, 0x7BCB2F0E, 0xC377486B, - 0xCB0D0FA2, 0x73B168C7, 0x6104C729, 0xD9B8A04C, - 0x446F98F5, 0xFCD3FF90, 0xEE66507E, 0x56DA371B, - 0x0EB9274D, 0xB6054028, 0xA4B0EFC6, 0x1C0C88A3, - 0x81DBB01A, 0x3967D77F, 0x2BD27891, 0x936E1FF4, - 0x3B26F703, 0x839A9066, 0x912F3F88, 0x299358ED, - 0xB4446054, 0x0CF80731, 0x1E4DA8DF, 0xA6F1CFBA, - 0xFE92DFEC, 0x462EB889, 0x549B1767, 0xEC277002, - 0x71F048BB, 0xC94C2FDE, 0xDBF98030, 0x6345E755, - 0x6B3FA09C, 0xD383C7F9, 0xC1366817, 0x798A0F72, - 0xE45D37CB, 0x5CE150AE, 0x4E54FF40, 0xF6E89825, - 0xAE8B8873, 0x1637EF16, 0x048240F8, 0xBC3E279D, - 0x21E91F24, 0x99557841, 0x8BE0D7AF, 0x335CB0CA, - 0xED59B63B, 0x55E5D15E, 0x47507EB0, 0xFFEC19D5, - 0x623B216C, 0xDA874609, 0xC832E9E7, 0x708E8E82, - 0x28ED9ED4, 0x9051F9B1, 0x82E4565F, 0x3A58313A, - 0xA78F0983, 0x1F336EE6, 0x0D86C108, 0xB53AA66D, - 0xBD40E1A4, 0x05FC86C1, 0x1749292F, 0xAFF54E4A, - 0x322276F3, 0x8A9E1196, 0x982BBE78, 0x2097D91D, - 0x78F4C94B, 0xC048AE2E, 0xD2FD01C0, 0x6A4166A5, - 0xF7965E1C, 0x4F2A3979, 0x5D9F9697, 0xE523F1F2, - 0x4D6B1905, 0xF5D77E60, 0xE762D18E, 0x5FDEB6EB, - 0xC2098E52, 0x7AB5E937, 0x680046D9, 0xD0BC21BC, - 0x88DF31EA, 0x3063568F, 0x22D6F961, 0x9A6A9E04, - 0x07BDA6BD, 0xBF01C1D8, 0xADB46E36, 0x15080953, - 0x1D724E9A, 0xA5CE29FF, 0xB77B8611, 0x0FC7E174, - 0x9210D9CD, 0x2AACBEA8, 0x38191146, 0x80A57623, - 0xD8C66675, 0x607A0110, 0x72CFAEFE, 0xCA73C99B, - 0x57A4F122, 0xEF189647, 0xFDAD39A9, 0x45115ECC, - 0x764DEE06, 0xCEF18963, 0xDC44268D, 0x64F841E8, - 0xF92F7951, 0x41931E34, 0x5326B1DA, 0xEB9AD6BF, - 0xB3F9C6E9, 0x0B45A18C, 0x19F00E62, 0xA14C6907, - 0x3C9B51BE, 0x842736DB, 0x96929935, 0x2E2EFE50, - 0x2654B999, 0x9EE8DEFC, 0x8C5D7112, 0x34E11677, - 0xA9362ECE, 0x118A49AB, 0x033FE645, 0xBB838120, - 0xE3E09176, 0x5B5CF613, 0x49E959FD, 0xF1553E98, - 0x6C820621, 0xD43E6144, 0xC68BCEAA, 0x7E37A9CF, - 0xD67F4138, 0x6EC3265D, 0x7C7689B3, 0xC4CAEED6, - 0x591DD66F, 0xE1A1B10A, 0xF3141EE4, 0x4BA87981, - 0x13CB69D7, 0xAB770EB2, 0xB9C2A15C, 0x017EC639, - 0x9CA9FE80, 0x241599E5, 0x36A0360B, 0x8E1C516E, - 0x866616A7, 0x3EDA71C2, 0x2C6FDE2C, 0x94D3B949, - 0x090481F0, 0xB1B8E695, 0xA30D497B, 0x1BB12E1E, - 0x43D23E48, 0xFB6E592D, 0xE9DBF6C3, 0x516791A6, - 0xCCB0A91F, 0x740CCE7A, 0x66B96194, 0xDE0506F1, - /* T8_4 */ - 0x00000000, 0x3D6029B0, 0x7AC05360, 0x47A07AD0, - 0xF580A6C0, 0xC8E08F70, 0x8F40F5A0, 0xB220DC10, - 0x30704BC1, 0x0D106271, 0x4AB018A1, 0x77D03111, - 0xC5F0ED01, 0xF890C4B1, 0xBF30BE61, 0x825097D1, - 0x60E09782, 0x5D80BE32, 0x1A20C4E2, 0x2740ED52, - 0x95603142, 0xA80018F2, 0xEFA06222, 0xD2C04B92, - 0x5090DC43, 0x6DF0F5F3, 0x2A508F23, 0x1730A693, - 0xA5107A83, 0x98705333, 0xDFD029E3, 0xE2B00053, - 0xC1C12F04, 0xFCA106B4, 0xBB017C64, 0x866155D4, - 0x344189C4, 0x0921A074, 0x4E81DAA4, 0x73E1F314, - 0xF1B164C5, 0xCCD14D75, 0x8B7137A5, 0xB6111E15, - 0x0431C205, 0x3951EBB5, 0x7EF19165, 0x4391B8D5, - 0xA121B886, 0x9C419136, 0xDBE1EBE6, 0xE681C256, - 0x54A11E46, 0x69C137F6, 0x2E614D26, 0x13016496, - 0x9151F347, 0xAC31DAF7, 0xEB91A027, 0xD6F18997, - 0x64D15587, 0x59B17C37, 0x1E1106E7, 0x23712F57, - 0x58F35849, 0x659371F9, 0x22330B29, 0x1F532299, - 0xAD73FE89, 0x9013D739, 0xD7B3ADE9, 0xEAD38459, - 0x68831388, 0x55E33A38, 0x124340E8, 0x2F236958, - 0x9D03B548, 0xA0639CF8, 0xE7C3E628, 0xDAA3CF98, - 0x3813CFCB, 0x0573E67B, 0x42D39CAB, 0x7FB3B51B, - 0xCD93690B, 0xF0F340BB, 0xB7533A6B, 0x8A3313DB, - 0x0863840A, 0x3503ADBA, 0x72A3D76A, 0x4FC3FEDA, - 0xFDE322CA, 0xC0830B7A, 0x872371AA, 0xBA43581A, - 0x9932774D, 0xA4525EFD, 0xE3F2242D, 0xDE920D9D, - 0x6CB2D18D, 0x51D2F83D, 0x167282ED, 0x2B12AB5D, - 0xA9423C8C, 0x9422153C, 0xD3826FEC, 0xEEE2465C, - 0x5CC29A4C, 0x61A2B3FC, 0x2602C92C, 0x1B62E09C, - 0xF9D2E0CF, 0xC4B2C97F, 0x8312B3AF, 0xBE729A1F, - 0x0C52460F, 0x31326FBF, 0x7692156F, 0x4BF23CDF, - 0xC9A2AB0E, 0xF4C282BE, 0xB362F86E, 0x8E02D1DE, - 0x3C220DCE, 0x0142247E, 0x46E25EAE, 0x7B82771E, - 0xB1E6B092, 0x8C869922, 0xCB26E3F2, 0xF646CA42, - 0x44661652, 0x79063FE2, 0x3EA64532, 0x03C66C82, - 0x8196FB53, 0xBCF6D2E3, 0xFB56A833, 0xC6368183, - 0x74165D93, 0x49767423, 0x0ED60EF3, 0x33B62743, - 0xD1062710, 0xEC660EA0, 0xABC67470, 0x96A65DC0, - 0x248681D0, 0x19E6A860, 0x5E46D2B0, 0x6326FB00, - 0xE1766CD1, 0xDC164561, 0x9BB63FB1, 0xA6D61601, - 0x14F6CA11, 0x2996E3A1, 0x6E369971, 0x5356B0C1, - 0x70279F96, 0x4D47B626, 0x0AE7CCF6, 0x3787E546, - 0x85A73956, 0xB8C710E6, 0xFF676A36, 0xC2074386, - 0x4057D457, 0x7D37FDE7, 0x3A978737, 0x07F7AE87, - 0xB5D77297, 0x88B75B27, 0xCF1721F7, 0xF2770847, - 0x10C70814, 0x2DA721A4, 0x6A075B74, 0x576772C4, - 0xE547AED4, 0xD8278764, 0x9F87FDB4, 0xA2E7D404, - 0x20B743D5, 0x1DD76A65, 0x5A7710B5, 0x67173905, - 0xD537E515, 0xE857CCA5, 0xAFF7B675, 0x92979FC5, - 0xE915E8DB, 0xD475C16B, 0x93D5BBBB, 0xAEB5920B, - 0x1C954E1B, 0x21F567AB, 0x66551D7B, 0x5B3534CB, - 0xD965A31A, 0xE4058AAA, 0xA3A5F07A, 0x9EC5D9CA, - 0x2CE505DA, 0x11852C6A, 0x562556BA, 0x6B457F0A, - 0x89F57F59, 0xB49556E9, 0xF3352C39, 0xCE550589, - 0x7C75D999, 0x4115F029, 0x06B58AF9, 0x3BD5A349, - 0xB9853498, 0x84E51D28, 0xC34567F8, 0xFE254E48, - 0x4C059258, 0x7165BBE8, 0x36C5C138, 0x0BA5E888, - 0x28D4C7DF, 0x15B4EE6F, 0x521494BF, 0x6F74BD0F, - 0xDD54611F, 0xE03448AF, 0xA794327F, 0x9AF41BCF, - 0x18A48C1E, 0x25C4A5AE, 0x6264DF7E, 0x5F04F6CE, - 0xED242ADE, 0xD044036E, 0x97E479BE, 0xAA84500E, - 0x4834505D, 0x755479ED, 0x32F4033D, 0x0F942A8D, - 0xBDB4F69D, 0x80D4DF2D, 0xC774A5FD, 0xFA148C4D, - 0x78441B9C, 0x4524322C, 0x028448FC, 0x3FE4614C, - 0x8DC4BD5C, 0xB0A494EC, 0xF704EE3C, 0xCA64C78C, - /* T8_5 */ - 0x00000000, 0xCB5CD3A5, 0x4DC8A10B, 0x869472AE, - 0x9B914216, 0x50CD91B3, 0xD659E31D, 0x1D0530B8, - 0xEC53826D, 0x270F51C8, 0xA19B2366, 0x6AC7F0C3, - 0x77C2C07B, 0xBC9E13DE, 0x3A0A6170, 0xF156B2D5, - 0x03D6029B, 0xC88AD13E, 0x4E1EA390, 0x85427035, - 0x9847408D, 0x531B9328, 0xD58FE186, 0x1ED33223, - 0xEF8580F6, 0x24D95353, 0xA24D21FD, 0x6911F258, - 0x7414C2E0, 0xBF481145, 0x39DC63EB, 0xF280B04E, - 0x07AC0536, 0xCCF0D693, 0x4A64A43D, 0x81387798, - 0x9C3D4720, 0x57619485, 0xD1F5E62B, 0x1AA9358E, - 0xEBFF875B, 0x20A354FE, 0xA6372650, 0x6D6BF5F5, - 0x706EC54D, 0xBB3216E8, 0x3DA66446, 0xF6FAB7E3, - 0x047A07AD, 0xCF26D408, 0x49B2A6A6, 0x82EE7503, - 0x9FEB45BB, 0x54B7961E, 0xD223E4B0, 0x197F3715, - 0xE82985C0, 0x23755665, 0xA5E124CB, 0x6EBDF76E, - 0x73B8C7D6, 0xB8E41473, 0x3E7066DD, 0xF52CB578, - 0x0F580A6C, 0xC404D9C9, 0x4290AB67, 0x89CC78C2, - 0x94C9487A, 0x5F959BDF, 0xD901E971, 0x125D3AD4, - 0xE30B8801, 0x28575BA4, 0xAEC3290A, 0x659FFAAF, - 0x789ACA17, 0xB3C619B2, 0x35526B1C, 0xFE0EB8B9, - 0x0C8E08F7, 0xC7D2DB52, 0x4146A9FC, 0x8A1A7A59, - 0x971F4AE1, 0x5C439944, 0xDAD7EBEA, 0x118B384F, - 0xE0DD8A9A, 0x2B81593F, 0xAD152B91, 0x6649F834, - 0x7B4CC88C, 0xB0101B29, 0x36846987, 0xFDD8BA22, - 0x08F40F5A, 0xC3A8DCFF, 0x453CAE51, 0x8E607DF4, - 0x93654D4C, 0x58399EE9, 0xDEADEC47, 0x15F13FE2, - 0xE4A78D37, 0x2FFB5E92, 0xA96F2C3C, 0x6233FF99, - 0x7F36CF21, 0xB46A1C84, 0x32FE6E2A, 0xF9A2BD8F, - 0x0B220DC1, 0xC07EDE64, 0x46EAACCA, 0x8DB67F6F, - 0x90B34FD7, 0x5BEF9C72, 0xDD7BEEDC, 0x16273D79, - 0xE7718FAC, 0x2C2D5C09, 0xAAB92EA7, 0x61E5FD02, - 0x7CE0CDBA, 0xB7BC1E1F, 0x31286CB1, 0xFA74BF14, - 0x1EB014D8, 0xD5ECC77D, 0x5378B5D3, 0x98246676, - 0x852156CE, 0x4E7D856B, 0xC8E9F7C5, 0x03B52460, - 0xF2E396B5, 0x39BF4510, 0xBF2B37BE, 0x7477E41B, - 0x6972D4A3, 0xA22E0706, 0x24BA75A8, 0xEFE6A60D, - 0x1D661643, 0xD63AC5E6, 0x50AEB748, 0x9BF264ED, - 0x86F75455, 0x4DAB87F0, 0xCB3FF55E, 0x006326FB, - 0xF135942E, 0x3A69478B, 0xBCFD3525, 0x77A1E680, - 0x6AA4D638, 0xA1F8059D, 0x276C7733, 0xEC30A496, - 0x191C11EE, 0xD240C24B, 0x54D4B0E5, 0x9F886340, - 0x828D53F8, 0x49D1805D, 0xCF45F2F3, 0x04192156, - 0xF54F9383, 0x3E134026, 0xB8873288, 0x73DBE12D, - 0x6EDED195, 0xA5820230, 0x2316709E, 0xE84AA33B, - 0x1ACA1375, 0xD196C0D0, 0x5702B27E, 0x9C5E61DB, - 0x815B5163, 0x4A0782C6, 0xCC93F068, 0x07CF23CD, - 0xF6999118, 0x3DC542BD, 0xBB513013, 0x700DE3B6, - 0x6D08D30E, 0xA65400AB, 0x20C07205, 0xEB9CA1A0, - 0x11E81EB4, 0xDAB4CD11, 0x5C20BFBF, 0x977C6C1A, - 0x8A795CA2, 0x41258F07, 0xC7B1FDA9, 0x0CED2E0C, - 0xFDBB9CD9, 0x36E74F7C, 0xB0733DD2, 0x7B2FEE77, - 0x662ADECF, 0xAD760D6A, 0x2BE27FC4, 0xE0BEAC61, - 0x123E1C2F, 0xD962CF8A, 0x5FF6BD24, 0x94AA6E81, - 0x89AF5E39, 0x42F38D9C, 0xC467FF32, 0x0F3B2C97, - 0xFE6D9E42, 0x35314DE7, 0xB3A53F49, 0x78F9ECEC, - 0x65FCDC54, 0xAEA00FF1, 0x28347D5F, 0xE368AEFA, - 0x16441B82, 0xDD18C827, 0x5B8CBA89, 0x90D0692C, - 0x8DD55994, 0x46898A31, 0xC01DF89F, 0x0B412B3A, - 0xFA1799EF, 0x314B4A4A, 0xB7DF38E4, 0x7C83EB41, - 0x6186DBF9, 0xAADA085C, 0x2C4E7AF2, 0xE712A957, - 0x15921919, 0xDECECABC, 0x585AB812, 0x93066BB7, - 0x8E035B0F, 0x455F88AA, 0xC3CBFA04, 0x089729A1, - 0xF9C19B74, 0x329D48D1, 0xB4093A7F, 0x7F55E9DA, - 0x6250D962, 0xA90C0AC7, 0x2F987869, 0xE4C4ABCC, - /* T8_6 */ - 0x00000000, 0xA6770BB4, 0x979F1129, 0x31E81A9D, - 0xF44F2413, 0x52382FA7, 0x63D0353A, 0xC5A73E8E, - 0x33EF4E67, 0x959845D3, 0xA4705F4E, 0x020754FA, - 0xC7A06A74, 0x61D761C0, 0x503F7B5D, 0xF64870E9, - 0x67DE9CCE, 0xC1A9977A, 0xF0418DE7, 0x56368653, - 0x9391B8DD, 0x35E6B369, 0x040EA9F4, 0xA279A240, - 0x5431D2A9, 0xF246D91D, 0xC3AEC380, 0x65D9C834, - 0xA07EF6BA, 0x0609FD0E, 0x37E1E793, 0x9196EC27, - 0xCFBD399C, 0x69CA3228, 0x582228B5, 0xFE552301, - 0x3BF21D8F, 0x9D85163B, 0xAC6D0CA6, 0x0A1A0712, - 0xFC5277FB, 0x5A257C4F, 0x6BCD66D2, 0xCDBA6D66, - 0x081D53E8, 0xAE6A585C, 0x9F8242C1, 0x39F54975, - 0xA863A552, 0x0E14AEE6, 0x3FFCB47B, 0x998BBFCF, - 0x5C2C8141, 0xFA5B8AF5, 0xCBB39068, 0x6DC49BDC, - 0x9B8CEB35, 0x3DFBE081, 0x0C13FA1C, 0xAA64F1A8, - 0x6FC3CF26, 0xC9B4C492, 0xF85CDE0F, 0x5E2BD5BB, - 0x440B7579, 0xE27C7ECD, 0xD3946450, 0x75E36FE4, - 0xB044516A, 0x16335ADE, 0x27DB4043, 0x81AC4BF7, - 0x77E43B1E, 0xD19330AA, 0xE07B2A37, 0x460C2183, - 0x83AB1F0D, 0x25DC14B9, 0x14340E24, 0xB2430590, - 0x23D5E9B7, 0x85A2E203, 0xB44AF89E, 0x123DF32A, - 0xD79ACDA4, 0x71EDC610, 0x4005DC8D, 0xE672D739, - 0x103AA7D0, 0xB64DAC64, 0x87A5B6F9, 0x21D2BD4D, - 0xE47583C3, 0x42028877, 0x73EA92EA, 0xD59D995E, - 0x8BB64CE5, 0x2DC14751, 0x1C295DCC, 0xBA5E5678, - 0x7FF968F6, 0xD98E6342, 0xE86679DF, 0x4E11726B, - 0xB8590282, 0x1E2E0936, 0x2FC613AB, 0x89B1181F, - 0x4C162691, 0xEA612D25, 0xDB8937B8, 0x7DFE3C0C, - 0xEC68D02B, 0x4A1FDB9F, 0x7BF7C102, 0xDD80CAB6, - 0x1827F438, 0xBE50FF8C, 0x8FB8E511, 0x29CFEEA5, - 0xDF879E4C, 0x79F095F8, 0x48188F65, 0xEE6F84D1, - 0x2BC8BA5F, 0x8DBFB1EB, 0xBC57AB76, 0x1A20A0C2, - 0x8816EAF2, 0x2E61E146, 0x1F89FBDB, 0xB9FEF06F, - 0x7C59CEE1, 0xDA2EC555, 0xEBC6DFC8, 0x4DB1D47C, - 0xBBF9A495, 0x1D8EAF21, 0x2C66B5BC, 0x8A11BE08, - 0x4FB68086, 0xE9C18B32, 0xD82991AF, 0x7E5E9A1B, - 0xEFC8763C, 0x49BF7D88, 0x78576715, 0xDE206CA1, - 0x1B87522F, 0xBDF0599B, 0x8C184306, 0x2A6F48B2, - 0xDC27385B, 0x7A5033EF, 0x4BB82972, 0xEDCF22C6, - 0x28681C48, 0x8E1F17FC, 0xBFF70D61, 0x198006D5, - 0x47ABD36E, 0xE1DCD8DA, 0xD034C247, 0x7643C9F3, - 0xB3E4F77D, 0x1593FCC9, 0x247BE654, 0x820CEDE0, - 0x74449D09, 0xD23396BD, 0xE3DB8C20, 0x45AC8794, - 0x800BB91A, 0x267CB2AE, 0x1794A833, 0xB1E3A387, - 0x20754FA0, 0x86024414, 0xB7EA5E89, 0x119D553D, - 0xD43A6BB3, 0x724D6007, 0x43A57A9A, 0xE5D2712E, - 0x139A01C7, 0xB5ED0A73, 0x840510EE, 0x22721B5A, - 0xE7D525D4, 0x41A22E60, 0x704A34FD, 0xD63D3F49, - 0xCC1D9F8B, 0x6A6A943F, 0x5B828EA2, 0xFDF58516, - 0x3852BB98, 0x9E25B02C, 0xAFCDAAB1, 0x09BAA105, - 0xFFF2D1EC, 0x5985DA58, 0x686DC0C5, 0xCE1ACB71, - 0x0BBDF5FF, 0xADCAFE4B, 0x9C22E4D6, 0x3A55EF62, - 0xABC30345, 0x0DB408F1, 0x3C5C126C, 0x9A2B19D8, - 0x5F8C2756, 0xF9FB2CE2, 0xC813367F, 0x6E643DCB, - 0x982C4D22, 0x3E5B4696, 0x0FB35C0B, 0xA9C457BF, - 0x6C636931, 0xCA146285, 0xFBFC7818, 0x5D8B73AC, - 0x03A0A617, 0xA5D7ADA3, 0x943FB73E, 0x3248BC8A, - 0xF7EF8204, 0x519889B0, 0x6070932D, 0xC6079899, - 0x304FE870, 0x9638E3C4, 0xA7D0F959, 0x01A7F2ED, - 0xC400CC63, 0x6277C7D7, 0x539FDD4A, 0xF5E8D6FE, - 0x647E3AD9, 0xC209316D, 0xF3E12BF0, 0x55962044, - 0x90311ECA, 0x3646157E, 0x07AE0FE3, 0xA1D90457, - 0x579174BE, 0xF1E67F0A, 0xC00E6597, 0x66796E23, - 0xA3DE50AD, 0x05A95B19, 0x34414184, 0x92364A30, - /* T8_7 */ - 0x00000000, 0xCCAA009E, 0x4225077D, 0x8E8F07E3, - 0x844A0EFA, 0x48E00E64, 0xC66F0987, 0x0AC50919, - 0xD3E51BB5, 0x1F4F1B2B, 0x91C01CC8, 0x5D6A1C56, - 0x57AF154F, 0x9B0515D1, 0x158A1232, 0xD92012AC, - 0x7CBB312B, 0xB01131B5, 0x3E9E3656, 0xF23436C8, - 0xF8F13FD1, 0x345B3F4F, 0xBAD438AC, 0x767E3832, - 0xAF5E2A9E, 0x63F42A00, 0xED7B2DE3, 0x21D12D7D, - 0x2B142464, 0xE7BE24FA, 0x69312319, 0xA59B2387, - 0xF9766256, 0x35DC62C8, 0xBB53652B, 0x77F965B5, - 0x7D3C6CAC, 0xB1966C32, 0x3F196BD1, 0xF3B36B4F, - 0x2A9379E3, 0xE639797D, 0x68B67E9E, 0xA41C7E00, - 0xAED97719, 0x62737787, 0xECFC7064, 0x205670FA, - 0x85CD537D, 0x496753E3, 0xC7E85400, 0x0B42549E, - 0x01875D87, 0xCD2D5D19, 0x43A25AFA, 0x8F085A64, - 0x562848C8, 0x9A824856, 0x140D4FB5, 0xD8A74F2B, - 0xD2624632, 0x1EC846AC, 0x9047414F, 0x5CED41D1, - 0x299DC2ED, 0xE537C273, 0x6BB8C590, 0xA712C50E, - 0xADD7CC17, 0x617DCC89, 0xEFF2CB6A, 0x2358CBF4, - 0xFA78D958, 0x36D2D9C6, 0xB85DDE25, 0x74F7DEBB, - 0x7E32D7A2, 0xB298D73C, 0x3C17D0DF, 0xF0BDD041, - 0x5526F3C6, 0x998CF358, 0x1703F4BB, 0xDBA9F425, - 0xD16CFD3C, 0x1DC6FDA2, 0x9349FA41, 0x5FE3FADF, - 0x86C3E873, 0x4A69E8ED, 0xC4E6EF0E, 0x084CEF90, - 0x0289E689, 0xCE23E617, 0x40ACE1F4, 0x8C06E16A, - 0xD0EBA0BB, 0x1C41A025, 0x92CEA7C6, 0x5E64A758, - 0x54A1AE41, 0x980BAEDF, 0x1684A93C, 0xDA2EA9A2, - 0x030EBB0E, 0xCFA4BB90, 0x412BBC73, 0x8D81BCED, - 0x8744B5F4, 0x4BEEB56A, 0xC561B289, 0x09CBB217, - 0xAC509190, 0x60FA910E, 0xEE7596ED, 0x22DF9673, - 0x281A9F6A, 0xE4B09FF4, 0x6A3F9817, 0xA6959889, - 0x7FB58A25, 0xB31F8ABB, 0x3D908D58, 0xF13A8DC6, - 0xFBFF84DF, 0x37558441, 0xB9DA83A2, 0x7570833C, - 0x533B85DA, 0x9F918544, 0x111E82A7, 0xDDB48239, - 0xD7718B20, 0x1BDB8BBE, 0x95548C5D, 0x59FE8CC3, - 0x80DE9E6F, 0x4C749EF1, 0xC2FB9912, 0x0E51998C, - 0x04949095, 0xC83E900B, 0x46B197E8, 0x8A1B9776, - 0x2F80B4F1, 0xE32AB46F, 0x6DA5B38C, 0xA10FB312, - 0xABCABA0B, 0x6760BA95, 0xE9EFBD76, 0x2545BDE8, - 0xFC65AF44, 0x30CFAFDA, 0xBE40A839, 0x72EAA8A7, - 0x782FA1BE, 0xB485A120, 0x3A0AA6C3, 0xF6A0A65D, - 0xAA4DE78C, 0x66E7E712, 0xE868E0F1, 0x24C2E06F, - 0x2E07E976, 0xE2ADE9E8, 0x6C22EE0B, 0xA088EE95, - 0x79A8FC39, 0xB502FCA7, 0x3B8DFB44, 0xF727FBDA, - 0xFDE2F2C3, 0x3148F25D, 0xBFC7F5BE, 0x736DF520, - 0xD6F6D6A7, 0x1A5CD639, 0x94D3D1DA, 0x5879D144, - 0x52BCD85D, 0x9E16D8C3, 0x1099DF20, 0xDC33DFBE, - 0x0513CD12, 0xC9B9CD8C, 0x4736CA6F, 0x8B9CCAF1, - 0x8159C3E8, 0x4DF3C376, 0xC37CC495, 0x0FD6C40B, - 0x7AA64737, 0xB60C47A9, 0x3883404A, 0xF42940D4, - 0xFEEC49CD, 0x32464953, 0xBCC94EB0, 0x70634E2E, - 0xA9435C82, 0x65E95C1C, 0xEB665BFF, 0x27CC5B61, - 0x2D095278, 0xE1A352E6, 0x6F2C5505, 0xA386559B, - 0x061D761C, 0xCAB77682, 0x44387161, 0x889271FF, - 0x825778E6, 0x4EFD7878, 0xC0727F9B, 0x0CD87F05, - 0xD5F86DA9, 0x19526D37, 0x97DD6AD4, 0x5B776A4A, - 0x51B26353, 0x9D1863CD, 0x1397642E, 0xDF3D64B0, - 0x83D02561, 0x4F7A25FF, 0xC1F5221C, 0x0D5F2282, - 0x079A2B9B, 0xCB302B05, 0x45BF2CE6, 0x89152C78, - 0x50353ED4, 0x9C9F3E4A, 0x121039A9, 0xDEBA3937, - 0xD47F302E, 0x18D530B0, 0x965A3753, 0x5AF037CD, - 0xFF6B144A, 0x33C114D4, 0xBD4E1337, 0x71E413A9, - 0x7B211AB0, 0xB78B1A2E, 0x39041DCD, 0xF5AE1D53, - 0x2C8E0FFF, 0xE0240F61, 0x6EAB0882, 0xA201081C, - 0xA8C40105, 0x646E019B, 0xEAE10678, 0x264B06E6 - }; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java deleted file mode 100644 index 1c443575f8179..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java +++ /dev/null @@ -1,559 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Some portions of this file Copyright (c) 2004-2006 Intel Corportation - * and licensed under the BSD license. - */ -package org.apache.hadoop.ozone.common; - -/** - * Similar to {@link org.apache.hadoop.util.PureJavaCrc32C} - * except that this class implement {@link ChecksumByteBuffer}. - */ -final class PureJavaCrc32CByteBuffer extends ChecksumByteBuffer.CrcIntTable { - @Override - int[] getTable() { - return T; - } - - /** - * CRC-32C lookup table generated by the polynomial 0x82F63B78. - * See also org.apache.hadoop.util.TestPureJavaCrc32.Table. - */ - private static final int[] T = { - /* T8_0 */ - 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, - 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, - 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, - 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24, - 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, - 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, - 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, - 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B, - 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, - 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35, - 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, - 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, - 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, - 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A, - 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, - 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, - 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, - 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957, - 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, - 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198, - 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, - 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38, - 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, - 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7, - 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, - 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, - 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, - 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46, - 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, - 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6, - 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, - 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, - 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, - 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93, - 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, - 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, - 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, - 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC, - 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, - 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033, - 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, - 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D, - 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, - 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982, - 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, - 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, - 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, - 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED, - 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, - 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F, - 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, - 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, - 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, - 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540, - 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, - 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, - 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, - 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1, - 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, - 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E, - 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, - 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E, - 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, - 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351, - /* T8_1 */ - 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, - 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945, - 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, - 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD, - 0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, - 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4, - 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, - 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C, - 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, - 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47, - 0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, - 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF, - 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, - 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6, - 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, - 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E, - 0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, - 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41, - 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, - 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9, - 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, - 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0, - 0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, - 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78, - 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, - 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43, - 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, - 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB, - 0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, - 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2, - 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, - 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A, - 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, - 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC, - 0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, - 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004, - 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, - 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D, - 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, - 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185, - 0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, - 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE, - 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, - 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306, - 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, - 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F, - 0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, - 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287, - 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, - 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8, - 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, - 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600, - 0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, - 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439, - 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, - 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781, - 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, - 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA, - 0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, - 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502, - 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, - 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B, - 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, - 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483, - /* T8_2 */ - 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, - 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469, - 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, - 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC, - 0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, - 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3, - 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, - 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726, - 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, - 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D, - 0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, - 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8, - 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, - 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7, - 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, - 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32, - 0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, - 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0, - 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, - 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75, - 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, - 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A, - 0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, - 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF, - 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, - 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4, - 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, - 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161, - 0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, - 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E, - 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, - 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB, - 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, - 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A, - 0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, - 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF, - 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, - 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0, - 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, - 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065, - 0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, - 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E, - 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, - 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB, - 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, - 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4, - 0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, - 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71, - 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, - 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3, - 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, - 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36, - 0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, - 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79, - 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, - 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC, - 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, - 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7, - 0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, - 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622, - 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, - 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D, - 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, - 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8, - /* T8_3 */ - 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, - 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA, - 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, - 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C, - 0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, - 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7, - 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, - 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11, - 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, - 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41, - 0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, - 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7, - 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, - 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C, - 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, - 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A, - 0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, - 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D, - 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, - 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB, - 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, - 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610, - 0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, - 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6, - 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, - 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6, - 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, - 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040, - 0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, - 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B, - 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, - 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D, - 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, - 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5, - 0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, - 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213, - 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, - 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8, - 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, - 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E, - 0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, - 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E, - 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, - 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698, - 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, - 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443, - 0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, - 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5, - 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, - 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12, - 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, - 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4, - 0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, - 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F, - 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, - 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9, - 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, - 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99, - 0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, - 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F, - 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, - 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4, - 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, - 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842, - /* T8_4 */ - 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, - 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44, - 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, - 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5, - 0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, - 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97, - 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, - 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406, - 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, - 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13, - 0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, - 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082, - 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, - 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0, - 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, - 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151, - 0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, - 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA, - 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, - 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B, - 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, - 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539, - 0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, - 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8, - 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, - 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD, - 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, - 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C, - 0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, - 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E, - 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, - 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF, - 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, - 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18, - 0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, - 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089, - 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, - 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB, - 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, - 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A, - 0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, - 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F, - 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, - 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE, - 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, - 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C, - 0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, - 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D, - 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, - 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6, - 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, - 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27, - 0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, - 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065, - 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, - 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4, - 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, - 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1, - 0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, - 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70, - 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, - 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532, - 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, - 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3, - /* T8_5 */ - 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, - 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD, - 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, - 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2, - 0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, - 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93, - 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, - 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C, - 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, - 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20, - 0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, - 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F, - 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, - 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E, - 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, - 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201, - 0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, - 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746, - 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, - 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59, - 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, - 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778, - 0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, - 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67, - 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, - 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB, - 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, - 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4, - 0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, - 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5, - 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, - 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA, - 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, - 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B, - 0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, - 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364, - 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, - 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45, - 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, - 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A, - 0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, - 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6, - 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, - 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9, - 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, - 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8, - 0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, - 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7, - 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, - 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090, - 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, - 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F, - 0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, - 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE, - 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, - 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1, - 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, - 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D, - 0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, - 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02, - 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, - 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623, - 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, - 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C, - /* T8_6 */ - 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, - 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089, - 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, - 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA, - 0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, - 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F, - 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, - 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C, - 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, - 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334, - 0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, - 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67, - 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, - 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992, - 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, - 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1, - 0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, - 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3, - 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, - 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0, - 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, - 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55, - 0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, - 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006, - 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, - 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E, - 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, - 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D, - 0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, - 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8, - 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, - 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB, - 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, - 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D, - 0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, - 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E, - 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, - 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB, - 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, - 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988, - 0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, - 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0, - 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, - 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093, - 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, - 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766, - 0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, - 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35, - 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, - 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907, - 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, - 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454, - 0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, - 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1, - 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, - 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2, - 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, - 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA, - 0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, - 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9, - 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, - 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C, - 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, - 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F, - /* T8_7 */ - 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, - 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504, - 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, - 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE, - 0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, - 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0, - 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, - 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A, - 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, - 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D, - 0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, - 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447, - 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, - 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929, - 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, - 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3, - 0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, - 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36, - 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, - 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC, - 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, - 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782, - 0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, - 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358, - 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, - 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF, - 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, - 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75, - 0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, - 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B, - 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, - 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1, - 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, - 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360, - 0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, - 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA, - 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, - 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4, - 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, - 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E, - 0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, - 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9, - 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, - 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223, - 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, - 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D, - 0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, - 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97, - 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, - 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852, - 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, - 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88, - 0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, - 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6, - 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, - 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C, - 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, - 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB, - 0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, - 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911, - 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, - 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F, - 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, - 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5 - }; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java deleted file mode 100644 index 7992dad78db5a..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java +++ /dev/null @@ -1,261 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.file.DirectoryStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Properties; - -/** - * Storage information file. This Class defines the methods to check - * the consistency of the storage dir and the version file. - *

- * Local storage information is stored in a separate file VERSION. - * It contains type of the node, - * the storage layout version, the SCM id, and - * the OM/SCM state creation time. - * - */ -@InterfaceAudience.Private -public abstract class Storage { - private static final Logger LOG = LoggerFactory.getLogger(Storage.class); - - public static final String STORAGE_DIR_CURRENT = "current"; - protected static final String STORAGE_FILE_VERSION = "VERSION"; - public static final String CONTAINER_DIR = "containerDir"; - - private final NodeType nodeType; - private final File root; - private final File storageDir; - - private StorageState state; - private StorageInfo storageInfo; - - - /** - * Determines the state of the Version file. - */ - public enum StorageState { - NON_EXISTENT, NOT_INITIALIZED, INITIALIZED - } - - public Storage(NodeType type, File root, String sdName) - throws IOException { - this.nodeType = type; - this.root = root; - this.storageDir = new File(root, sdName); - this.state = getStorageState(); - if (state == StorageState.INITIALIZED) { - this.storageInfo = new StorageInfo(type, getVersionFile()); - } else { - this.storageInfo = new StorageInfo( - nodeType, StorageInfo.newClusterID(), Time.now()); - setNodeProperties(); - } - } - - /** - * Gets the path of the Storage dir. - * @return Storage dir path - */ - public String getStorageDir() { - return storageDir.getAbsoluteFile().toString(); - } - - /** - * Gets the state of the version file. - * @return the state of the Version file - */ - public StorageState getState() { - return state; - } - - public NodeType getNodeType() { - return storageInfo.getNodeType(); - } - - public String getClusterID() { - return storageInfo.getClusterID(); - } - - public long getCreationTime() { - return storageInfo.getCreationTime(); - } - - public void setClusterId(String clusterId) throws IOException { - if (state == StorageState.INITIALIZED) { - throw new IOException( - "Storage directory " + storageDir + " already initialized."); - } else { - storageInfo.setClusterId(clusterId); - } - } - - /** - * Retrieves the storageInfo instance to read/write the common - * version file properties. - * @return the instance of the storageInfo class - */ - protected StorageInfo getStorageInfo() { - return storageInfo; - } - - abstract protected Properties getNodeProperties(); - - /** - * Sets the Node properties specific to OM/SCM. - */ - private void setNodeProperties() { - Properties nodeProperties = getNodeProperties(); - if (nodeProperties != null) { - for (String key : nodeProperties.stringPropertyNames()) { - storageInfo.setProperty(key, nodeProperties.getProperty(key)); - } - } - } - - /** - * Directory {@code current} contains latest files defining - * the file system meta-data. - * - * @return the directory path - */ - public File getCurrentDir() { - return new File(storageDir, STORAGE_DIR_CURRENT); - } - - /** - * File {@code VERSION} contains the following fields: - *

    - *
  1. node type
  2. - *
  3. OM/SCM state creation time
  4. - *
  5. other fields specific for this node type
  6. - *
- * The version file is always written last during storage directory updates. - * The existence of the version file indicates that all other files have - * been successfully written in the storage directory, the storage is valid - * and does not need to be recovered. - * - * @return the version file path - */ - private File getVersionFile() { - return new File(getCurrentDir(), STORAGE_FILE_VERSION); - } - - - /** - * Check to see if current/ directory is empty. This method is used - * before determining to format the directory. - * @throws IOException if unable to list files under the directory. - */ - private void checkEmptyCurrent() throws IOException { - File currentDir = getCurrentDir(); - if (!currentDir.exists()) { - // if current/ does not exist, it's safe to format it. - return; - } - try (DirectoryStream dirStream = Files - .newDirectoryStream(currentDir.toPath())) { - if (dirStream.iterator().hasNext()) { - throw new InconsistentStorageStateException(getCurrentDir(), - "Can't initialize the storage directory because the current " - + "it is not empty."); - } - } - } - - /** - * Check consistency of the storage directory. - * - * @return state {@link StorageState} of the storage directory - * @throws IOException - */ - private StorageState getStorageState() throws IOException { - assert root != null : "root is null"; - String rootPath = root.getCanonicalPath(); - try { // check that storage exists - if (!root.exists()) { - // storage directory does not exist - LOG.warn("Storage directory " + rootPath + " does not exist"); - return StorageState.NON_EXISTENT; - } - // or is inaccessible - if (!root.isDirectory()) { - LOG.warn(rootPath + "is not a directory"); - return StorageState.NON_EXISTENT; - } - if (!FileUtil.canWrite(root)) { - LOG.warn("Cannot access storage directory " + rootPath); - return StorageState.NON_EXISTENT; - } - } catch (SecurityException ex) { - LOG.warn("Cannot access storage directory " + rootPath, ex); - return StorageState.NON_EXISTENT; - } - - // check whether current directory is valid - File versionFile = getVersionFile(); - boolean hasCurrent = versionFile.exists(); - - if (hasCurrent) { - return StorageState.INITIALIZED; - } else { - checkEmptyCurrent(); - return StorageState.NOT_INITIALIZED; - } - } - - /** - * Creates the Version file if not present, - * otherwise returns with IOException. - * @throws IOException - */ - public void initialize() throws IOException { - if (state == StorageState.INITIALIZED) { - throw new IOException("Storage directory already initialized."); - } - if (!getCurrentDir().mkdirs()) { - throw new IOException("Cannot create directory " + getCurrentDir()); - } - storageInfo.writeTo(getVersionFile()); - } - - /** - * Persists current StorageInfo to file system.. - * @throws IOException - */ - public void persistCurrentState() throws IOException { - if (!getCurrentDir().exists()) { - throw new IOException("Metadata dir doesn't exist, dir: " + - getCurrentDir()); - } - storageInfo.writeTo(getVersionFile()); - } - -} - diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java deleted file mode 100644 index ad26f77e3ac9e..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java +++ /dev/null @@ -1,182 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.util.Properties; -import java.util.UUID; - -/** - * Common class for storage information. This class defines the common - * properties and functions to set them , write them into the version file - * and read them from the version file. - * - */ -@InterfaceAudience.Private -public class StorageInfo { - - private Properties properties = new Properties(); - - /** - * Property to hold node type. - */ - private static final String NODE_TYPE = "nodeType"; - /** - * Property to hold ID of the cluster. - */ - private static final String CLUSTER_ID = "clusterID"; - /** - * Property to hold creation time of the storage. - */ - private static final String CREATION_TIME = "cTime"; - - /** - * Constructs StorageInfo instance. - * @param type - * Type of the node using the storage - * @param cid - * Cluster ID - * @param cT - * Cluster creation Time - - * @throws IOException - on Error. - */ - public StorageInfo(NodeType type, String cid, long cT) - throws IOException { - Preconditions.checkNotNull(type); - Preconditions.checkNotNull(cid); - properties.setProperty(NODE_TYPE, type.name()); - properties.setProperty(CLUSTER_ID, cid); - properties.setProperty(CREATION_TIME, String.valueOf(cT)); - } - - public StorageInfo(NodeType type, File propertiesFile) - throws IOException { - this.properties = readFrom(propertiesFile); - verifyNodeType(type); - verifyClusterId(); - verifyCreationTime(); - } - - public NodeType getNodeType() { - return NodeType.valueOf(properties.getProperty(NODE_TYPE)); - } - - public String getClusterID() { - return properties.getProperty(CLUSTER_ID); - } - - public Long getCreationTime() { - String creationTime = properties.getProperty(CREATION_TIME); - if(creationTime != null) { - return Long.parseLong(creationTime); - } - return null; - } - - public String getProperty(String key) { - return properties.getProperty(key); - } - - public void setProperty(String key, String value) { - properties.setProperty(key, value); - } - - public void setClusterId(String clusterId) { - properties.setProperty(CLUSTER_ID, clusterId); - } - - private void verifyNodeType(NodeType type) - throws InconsistentStorageStateException { - NodeType nodeType = getNodeType(); - Preconditions.checkNotNull(nodeType); - if(type != nodeType) { - throw new InconsistentStorageStateException("Expected NodeType: " + type + - ", but found: " + nodeType); - } - } - - private void verifyClusterId() - throws InconsistentStorageStateException { - String clusterId = getClusterID(); - Preconditions.checkNotNull(clusterId); - if(clusterId.isEmpty()) { - throw new InconsistentStorageStateException("Cluster ID not found"); - } - } - - private void verifyCreationTime() { - Long creationTime = getCreationTime(); - Preconditions.checkNotNull(creationTime); - } - - - public void writeTo(File to) - throws IOException { - try (RandomAccessFile file = new RandomAccessFile(to, "rws"); - FileOutputStream out = new FileOutputStream(file.getFD())) { - file.seek(0); - /* - * If server is interrupted before this line, - * the version file will remain unchanged. - */ - properties.store(out, null); - /* - * Now the new fields are flushed to the head of the file, but file - * length can still be larger then required and therefore the file can - * contain whole or corrupted fields from its old contents in the end. - * If server is interrupted here and restarted later these extra fields - * either should not effect server behavior or should be handled - * by the server correctly. - */ - file.setLength(out.getChannel().position()); - } - } - - private Properties readFrom(File from) throws IOException { - try (RandomAccessFile file = new RandomAccessFile(from, "rws"); - FileInputStream in = new FileInputStream(file.getFD())) { - Properties props = new Properties(); - file.seek(0); - props.load(in); - return props; - } - } - - /** - * Generate new clusterID. - * - * clusterID is a persistent attribute of the cluster. - * It is generated when the cluster is created and remains the same - * during the life cycle of the cluster. When a new SCM node is initialized, - * if this is a new cluster, a new clusterID is generated and stored. - * @return new clusterID - */ - public static String newClusterID() { - return "CID-" + UUID.randomUUID().toString(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java deleted file mode 100644 index 6517e5897eac6..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java deleted file mode 100644 index 9aeff248381e9..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.common.statemachine; - -/** - * Class wraps invalid state transition exception. - */ -public class InvalidStateTransitionException extends Exception { - private Enum currentState; - private Enum event; - - public InvalidStateTransitionException(Enum currentState, Enum event) { - super("Invalid event: " + event + " at " + currentState + " state."); - this.currentState = currentState; - this.event = event; - } - - public Enum getCurrentState() { - return currentState; - } - - public Enum getEvent() { - return event; - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java deleted file mode 100644 index bf8cbd596edfb..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.common.statemachine; - -import com.google.common.base.Supplier; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; - -import java.util.HashMap; -import java.util.Map; -import java.util.Set; - -/** - * Template class that wraps simple event driven state machine. - * @param states allowed - * @param events allowed - */ -public class StateMachine, EVENT extends Enum> { - private STATE initialState; - private Set finalStates; - - private final LoadingCache> transitions = - CacheBuilder.newBuilder().build( - CacheLoader.from((Supplier>) () -> new HashMap())); - - public StateMachine(STATE initState, Set finalStates) { - this.initialState = initState; - this.finalStates = finalStates; - } - - public STATE getInitialState() { - return initialState; - } - - public Set getFinalStates() { - return finalStates; - } - - public STATE getNextState(STATE from, EVENT e) - throws InvalidStateTransitionException { - STATE target = transitions.getUnchecked(e).get(from); - if (target == null) { - throw new InvalidStateTransitionException(from, e); - } - return target; - } - - public void addTransition(STATE from, STATE to, EVENT e) { - transitions.getUnchecked(e).put(from, to); - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java deleted file mode 100644 index 045409e3ed2ea..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common.statemachine; -/** - state machine template class for ozone. - **/ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java deleted file mode 100644 index e0cac8beded9b..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.helpers; - -import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.client.BlockID; -import com.google.common.base.Preconditions; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import java.util.ArrayList; - -/** - * Helper class to convert Protobuf to Java classes. - */ -public class BlockData { - private final BlockID blockID; - private final Map metadata; - - /** - * Represent a list of chunks. - * In order to reduce memory usage, chunkList is declared as an - * {@link Object}. - * When #elements == 0, chunkList is null. - * When #elements == 1, chunkList refers to the only element. - * When #elements > 1, chunkList refers to the list. - * - * Please note : when we are working with blocks, we don't care what they - * point to. So we We don't read chunkinfo nor validate them. It is - * responsibility of higher layer like ozone. We just read and write data - * from network. - */ - private Object chunkList; - - /** - * total size of the key. - */ - private long size; - - /** - * Constructs a BlockData Object. - * - * @param blockID - */ - public BlockData(BlockID blockID) { - this.blockID = blockID; - this.metadata = new TreeMap<>(); - this.size = 0; - } - - public long getBlockCommitSequenceId() { - return blockID.getBlockCommitSequenceId(); - } - - public void setBlockCommitSequenceId(long blockCommitSequenceId) { - this.blockID.setBlockCommitSequenceId(blockCommitSequenceId); - } - - /** - * Returns a blockData object from the protobuf data. - * - * @param data - Protobuf data. - * @return - BlockData - * @throws IOException - */ - public static BlockData getFromProtoBuf(ContainerProtos.BlockData data) throws - IOException { - BlockData blockData = new BlockData( - BlockID.getFromProtobuf(data.getBlockID())); - for (int x = 0; x < data.getMetadataCount(); x++) { - blockData.addMetadata(data.getMetadata(x).getKey(), - data.getMetadata(x).getValue()); - } - blockData.setChunks(data.getChunksList()); - if (data.hasSize()) { - Preconditions.checkArgument(data.getSize() == blockData.getSize()); - } - return blockData; - } - - /** - * Returns a Protobuf message from BlockData. - * @return Proto Buf Message. - */ - public ContainerProtos.BlockData getProtoBufMessage() { - ContainerProtos.BlockData.Builder builder = - ContainerProtos.BlockData.newBuilder(); - builder.setBlockID(this.blockID.getDatanodeBlockIDProtobuf()); - for (Map.Entry entry : metadata.entrySet()) { - ContainerProtos.KeyValue.Builder keyValBuilder = - ContainerProtos.KeyValue.newBuilder(); - builder.addMetadata(keyValBuilder.setKey(entry.getKey()) - .setValue(entry.getValue()).build()); - } - builder.addAllChunks(getChunks()); - builder.setSize(size); - return builder.build(); - } - - /** - * Adds metadata. - * - * @param key - Key - * @param value - Value - * @throws IOException - */ - public synchronized void addMetadata(String key, String value) throws - IOException { - if (this.metadata.containsKey(key)) { - throw new IOException("This key already exists. Key " + key); - } - metadata.put(key, value); - } - - public synchronized Map getMetadata() { - return Collections.unmodifiableMap(this.metadata); - } - - /** - * Returns value of a key. - */ - public synchronized String getValue(String key) { - return metadata.get(key); - } - - /** - * Deletes a metadata entry from the map. - * - * @param key - Key - */ - public synchronized void deleteKey(String key) { - metadata.remove(key); - } - - @SuppressWarnings("unchecked") - private List castChunkList() { - return (List)chunkList; - } - - /** - * Returns chunks list. - * - * @return list of chunkinfo. - */ - public List getChunks() { - return chunkList == null? Collections.emptyList() - : chunkList instanceof ContainerProtos.ChunkInfo? - Collections.singletonList((ContainerProtos.ChunkInfo)chunkList) - : Collections.unmodifiableList(castChunkList()); - } - - /** - * Adds chinkInfo to the list. - */ - public void addChunk(ContainerProtos.ChunkInfo chunkInfo) { - if (chunkList == null) { - chunkList = chunkInfo; - } else { - final List list; - if (chunkList instanceof ContainerProtos.ChunkInfo) { - list = new ArrayList<>(2); - list.add((ContainerProtos.ChunkInfo)chunkList); - chunkList = list; - } else { - list = castChunkList(); - } - list.add(chunkInfo); - } - size += chunkInfo.getLen(); - } - - /** - * removes the chunk. - */ - public boolean removeChunk(ContainerProtos.ChunkInfo chunkInfo) { - final boolean removed; - if (chunkList instanceof List) { - final List list = castChunkList(); - removed = list.remove(chunkInfo); - if (list.size() == 1) { - chunkList = list.get(0); - } - } else if (chunkInfo.equals(chunkList)) { - chunkList = null; - removed = true; - } else { - removed = false; - } - - if (removed) { - size -= chunkInfo.getLen(); - } - return removed; - } - - /** - * Returns container ID. - * - * @return long. - */ - public long getContainerID() { - return blockID.getContainerID(); - } - - /** - * Returns LocalID. - * @return long. - */ - public long getLocalID() { - return blockID.getLocalID(); - } - - /** - * Return Block ID. - * @return BlockID. - */ - public BlockID getBlockID() { - return blockID; - } - - /** - * Sets Chunk list. - * - * @param chunks - List of chunks. - */ - public void setChunks(List chunks) { - if (chunks == null) { - chunkList = null; - size = 0L; - } else { - final int n = chunks.size(); - chunkList = n == 0? null: n == 1? chunks.get(0): chunks; - size = chunks.parallelStream().mapToLong( - ContainerProtos.ChunkInfo::getLen).sum(); - } - } - - /** - * Get the total size of chunks allocated for the key. - * @return total size of the key. - */ - public long getSize() { - return size; - } - - @Override - public String toString() { - return new ToStringBuilder(this, ToStringStyle.NO_CLASS_NAME_STYLE) - .append("blockId", blockID.toString()) - .append("size", this.size) - .toString(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java deleted file mode 100644 index 1c73a316e5db6..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.helpers; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -import java.io.IOException; -import java.util.Map; -import java.util.TreeMap; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; - -/** - * Java class that represents ChunkInfo ProtoBuf class. This helper class allows - * us to convert to and from protobuf to normal java. - */ -public class ChunkInfo { - private final String chunkName; - private final long offset; - private final long len; - private ChecksumData checksumData; - private final Map metadata; - - - /** - * Constructs a ChunkInfo. - * - * @param chunkName - File Name where chunk lives. - * @param offset - offset where Chunk Starts. - * @param len - Length of the Chunk. - */ - public ChunkInfo(String chunkName, long offset, long len) { - this.chunkName = chunkName; - this.offset = offset; - this.len = len; - this.metadata = new TreeMap<>(); - } - - /** - * Adds metadata. - * - * @param key - Key Name. - * @param value - Value. - * @throws IOException - */ - public void addMetadata(String key, String value) throws IOException { - synchronized (this.metadata) { - if (this.metadata.containsKey(key)) { - throw new IOException("This key already exists. Key " + key); - } - metadata.put(key, value); - } - } - - /** - * Gets a Chunkinfo class from the protobuf definitions. - * - * @param info - Protobuf class - * @return ChunkInfo - * @throws IOException - */ - public static ChunkInfo getFromProtoBuf(ContainerProtos.ChunkInfo info) - throws IOException { - Preconditions.checkNotNull(info); - - ChunkInfo chunkInfo = new ChunkInfo(info.getChunkName(), info.getOffset(), - info.getLen()); - - for (int x = 0; x < info.getMetadataCount(); x++) { - chunkInfo.addMetadata(info.getMetadata(x).getKey(), - info.getMetadata(x).getValue()); - } - - chunkInfo.setChecksumData( - ChecksumData.getFromProtoBuf(info.getChecksumData())); - - return chunkInfo; - } - - /** - * Returns a ProtoBuf Message from ChunkInfo. - * - * @return Protocol Buffer Message - */ - public ContainerProtos.ChunkInfo getProtoBufMessage() { - ContainerProtos.ChunkInfo.Builder builder = ContainerProtos - .ChunkInfo.newBuilder(); - - builder.setChunkName(this.getChunkName()); - builder.setOffset(this.getOffset()); - builder.setLen(this.getLen()); - if (checksumData == null) { - // ChecksumData cannot be null while computing the protobufMessage. - // Set it to NONE type (equivalent to non checksum). - builder.setChecksumData(Checksum.getNoChecksumDataProto()); - } else { - builder.setChecksumData(this.checksumData.getProtoBufMessage()); - } - - for (Map.Entry entry : metadata.entrySet()) { - ContainerProtos.KeyValue.Builder keyValBuilder = - ContainerProtos.KeyValue.newBuilder(); - builder.addMetadata(keyValBuilder.setKey(entry.getKey()) - .setValue(entry.getValue()).build()); - } - - return builder.build(); - } - - /** - * Returns the chunkName. - * - * @return - String - */ - public String getChunkName() { - return chunkName; - } - - /** - * Gets the start offset of the given chunk in physical file. - * - * @return - long - */ - public long getOffset() { - return offset; - } - - /** - * Returns the length of the Chunk. - * - * @return long - */ - public long getLen() { - return len; - } - - /** - * Returns the checksumData of this chunk. - */ - public ChecksumData getChecksumData() { - return checksumData; - } - - /** - * Sets the checksums of this chunk. - */ - public void setChecksumData(ChecksumData cData) { - this.checksumData = cData; - } - - /** - * Returns Metadata associated with this Chunk. - * - * @return - Map of Key,values. - */ - public Map getMetadata() { - return metadata; - } - - @Override - public String toString() { - return "ChunkInfo{" + - "chunkName='" + chunkName + - ", offset=" + offset + - ", len=" + len + - '}'; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java deleted file mode 100644 index 11d9028f1900c..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java +++ /dev/null @@ -1,196 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.helpers; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.ozone.audit.DNAction; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Map; -import java.util.TreeMap; - -/** - * Utilities for converting protobuf classes to Java classes. - */ -public final class ContainerCommandRequestPBHelper { - - static final Logger LOG = - LoggerFactory.getLogger(ContainerCommandRequestPBHelper.class); - - private ContainerCommandRequestPBHelper() { - } - - public static Map getAuditParams( - ContainerCommandRequestProto msg) { - Map auditParams = new TreeMap<>(); - Type cmdType = msg.getCmdType(); - String containerID = String.valueOf(msg.getContainerID()); - switch(cmdType) { - case CreateContainer: - auditParams.put("containerID", containerID); - auditParams.put("containerType", - msg.getCreateContainer().getContainerType().toString()); - return auditParams; - - case ReadContainer: - auditParams.put("containerID", containerID); - return auditParams; - - case UpdateContainer: - auditParams.put("containerID", containerID); - auditParams.put("forceUpdate", - String.valueOf(msg.getUpdateContainer().getForceUpdate())); - return auditParams; - - case DeleteContainer: - auditParams.put("containerID", containerID); - auditParams.put("forceDelete", - String.valueOf(msg.getDeleteContainer().getForceDelete())); - return auditParams; - - case ListContainer: - auditParams.put("startContainerID", containerID); - auditParams.put("count", - String.valueOf(msg.getListContainer().getCount())); - return auditParams; - - case PutBlock: - try{ - auditParams.put("blockData", - BlockData.getFromProtoBuf(msg.getPutBlock().getBlockData()) - .toString()); - } catch (IOException ex){ - if (LOG.isTraceEnabled()) { - LOG.trace("Encountered error parsing BlockData from protobuf: " - + ex.getMessage()); - } - return null; - } - return auditParams; - - case GetBlock: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getGetBlock().getBlockID()).toString()); - return auditParams; - - case DeleteBlock: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getDeleteBlock().getBlockID()) - .toString()); - return auditParams; - - case ListBlock: - auditParams.put("startLocalID", - String.valueOf(msg.getListBlock().getStartLocalID())); - auditParams.put("count", String.valueOf(msg.getListBlock().getCount())); - return auditParams; - - case ReadChunk: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getReadChunk().getBlockID()).toString()); - return auditParams; - - case DeleteChunk: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getDeleteChunk().getBlockID()) - .toString()); - return auditParams; - - case WriteChunk: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getWriteChunk().getBlockID()) - .toString()); - return auditParams; - - case ListChunk: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getListChunk().getBlockID()).toString()); - auditParams.put("prevChunkName", msg.getListChunk().getPrevChunkName()); - auditParams.put("count", String.valueOf(msg.getListChunk().getCount())); - return auditParams; - - case CompactChunk: return null; //CompactChunk operation - - case PutSmallFile: - try{ - auditParams.put("blockData", - BlockData.getFromProtoBuf(msg.getPutSmallFile() - .getBlock().getBlockData()).toString()); - } catch (IOException ex){ - if (LOG.isTraceEnabled()) { - LOG.trace("Encountered error parsing BlockData from protobuf: " - + ex.getMessage()); - } - } - return auditParams; - - case GetSmallFile: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getGetSmallFile().getBlock().getBlockID()) - .toString()); - return auditParams; - - case CloseContainer: - auditParams.put("containerID", containerID); - return auditParams; - - case GetCommittedBlockLength: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getGetCommittedBlockLength().getBlockID()) - .toString()); - return auditParams; - - default : - LOG.debug("Invalid command type - " + cmdType); - return null; - } - - } - - public static DNAction getAuditAction(Type cmdType) { - switch (cmdType) { - case CreateContainer : return DNAction.CREATE_CONTAINER; - case ReadContainer : return DNAction.READ_CONTAINER; - case UpdateContainer : return DNAction.UPDATE_CONTAINER; - case DeleteContainer : return DNAction.DELETE_CONTAINER; - case ListContainer : return DNAction.LIST_CONTAINER; - case PutBlock : return DNAction.PUT_BLOCK; - case GetBlock : return DNAction.GET_BLOCK; - case DeleteBlock : return DNAction.DELETE_BLOCK; - case ListBlock : return DNAction.LIST_BLOCK; - case ReadChunk : return DNAction.READ_CHUNK; - case DeleteChunk : return DNAction.DELETE_CHUNK; - case WriteChunk : return DNAction.WRITE_CHUNK; - case ListChunk : return DNAction.LIST_CHUNK; - case CompactChunk : return DNAction.COMPACT_CHUNK; - case PutSmallFile : return DNAction.PUT_SMALL_FILE; - case GetSmallFile : return DNAction.GET_SMALL_FILE; - case CloseContainer : return DNAction.CLOSE_CONTAINER; - case GetCommittedBlockLength : return DNAction.GET_COMMITTED_BLOCK_LENGTH; - default : - LOG.debug("Invalid command type - " + cmdType); - return null; - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java deleted file mode 100644 index fa5df113d879f..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.helpers; - -/** - * Helper classes for the container protocol communication. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java deleted file mode 100644 index dfa93156dabcc..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java +++ /dev/null @@ -1,189 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -import org.apache.hadoop.util.Time; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.Callable; - -/** - * This class represents the lease created on a resource. Callback can be - * registered on the lease which will be executed in case of timeout. - * - * @param Resource type for which the lease can be associated - */ -public class Lease { - - /** - * The resource for which this lease is created. - */ - private final T resource; - - private final long creationTime; - - /** - * Lease lifetime in milliseconds. - */ - private volatile long leaseTimeout; - - private boolean expired; - - /** - * Functions to be called in case of timeout. - */ - private List> callbacks; - - - /** - * Creates a lease on the specified resource with given timeout. - * - * @param resource - * Resource for which the lease has to be created - * @param timeout - * Lease lifetime in milliseconds - */ - public Lease(T resource, long timeout) { - this.resource = resource; - this.leaseTimeout = timeout; - this.callbacks = Collections.synchronizedList(new ArrayList<>()); - this.creationTime = Time.monotonicNow(); - this.expired = false; - } - - /** - * Returns true if the lease has expired, else false. - * - * @return true if expired, else false - */ - public boolean hasExpired() { - return expired; - } - - /** - * Registers a callback which will be executed in case of timeout. Callbacks - * are executed in a separate Thread. - * - * @param callback - * The Callable which has to be executed - * @throws LeaseExpiredException - * If the lease has already timed out - */ - public void registerCallBack(Callable callback) - throws LeaseExpiredException { - if(hasExpired()) { - throw new LeaseExpiredException("Resource: " + resource); - } - callbacks.add(callback); - } - - /** - * Returns the time elapsed since the creation of lease. - * - * @return elapsed time in milliseconds - * @throws LeaseExpiredException - * If the lease has already timed out - */ - public long getElapsedTime() throws LeaseExpiredException { - if(hasExpired()) { - throw new LeaseExpiredException("Resource: " + resource); - } - return Time.monotonicNow() - creationTime; - } - - /** - * Returns the time available before timeout. - * - * @return remaining time in milliseconds - * @throws LeaseExpiredException - * If the lease has already timed out - */ - public long getRemainingTime() throws LeaseExpiredException { - if(hasExpired()) { - throw new LeaseExpiredException("Resource: " + resource); - } - return leaseTimeout - getElapsedTime(); - } - - /** - * Returns total lease lifetime. - * - * @return total lifetime of lease in milliseconds - * @throws LeaseExpiredException - * If the lease has already timed out - */ - public long getLeaseLifeTime() throws LeaseExpiredException { - if(hasExpired()) { - throw new LeaseExpiredException("Resource: " + resource); - } - return leaseTimeout; - } - - /** - * Renews the lease timeout period. - * - * @param timeout - * Time to be added to the lease in milliseconds - * @throws LeaseExpiredException - * If the lease has already timed out - */ - public void renew(long timeout) throws LeaseExpiredException { - if(hasExpired()) { - throw new LeaseExpiredException("Resource: " + resource); - } - leaseTimeout += timeout; - } - - @Override - public int hashCode() { - return resource.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if(obj instanceof Lease) { - return resource.equals(((Lease) obj).resource); - } - return false; - } - - @Override - public String toString() { - return "Lease<" + resource.toString() + ">"; - } - - /** - * Returns the callbacks to be executed for the lease in case of timeout. - * - * @return callbacks to be executed - */ - List> getCallbacks() { - return callbacks; - } - - /** - * Expires/Invalidates the lease. - */ - void invalidate() { - callbacks = null; - expired = true; - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java deleted file mode 100644 index a39ea22df1000..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -/** - * This exception represents that there is already a lease acquired on the - * same resource. - */ -public class LeaseAlreadyExistException extends LeaseException { - - /** - * Constructs an {@code LeaseAlreadyExistException} with {@code null} - * as its error detail message. - */ - public LeaseAlreadyExistException() { - super(); - } - - /** - * Constructs an {@code LeaseAlreadyExistException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public LeaseAlreadyExistException(String message) { - super(message); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java deleted file mode 100644 index e2ca455ef0c57..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.concurrent.Callable; - -/** - * This class is responsible for executing the callbacks of a lease in case of - * timeout. - */ -public class LeaseCallbackExecutor implements Runnable { - - private static final Logger LOG = LoggerFactory.getLogger(Lease.class); - - private final T resource; - private final List> callbacks; - - /** - * Constructs LeaseCallbackExecutor instance with list of callbacks. - * - * @param resource - * The resource for which the callbacks are executed - * @param callbacks - * Callbacks to be executed by this executor - */ - public LeaseCallbackExecutor(T resource, List> callbacks) { - this.resource = resource; - this.callbacks = callbacks; - } - - @Override - public void run() { - if (LOG.isDebugEnabled()) { - LOG.debug("Executing callbacks for lease on {}", resource); - } - for(Callable callback : callbacks) { - try { - callback.call(); - } catch (Exception e) { - LOG.warn("Exception while executing callback for lease on {}", - resource, e); - } - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java deleted file mode 100644 index 418f4127df7f2..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -/** - * This exception represents all lease related exceptions. - */ -public class LeaseException extends Exception { - - /** - * Constructs an {@code LeaseException} with {@code null} - * as its error detail message. - */ - public LeaseException() { - super(); - } - - /** - * Constructs an {@code LeaseException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public LeaseException(String message) { - super(message); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java deleted file mode 100644 index 440a023beff8e..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -/** - * This exception represents that the lease that is being accessed has expired. - */ -public class LeaseExpiredException extends LeaseException { - - /** - * Constructs an {@code LeaseExpiredException} with {@code null} - * as its error detail message. - */ - public LeaseExpiredException() { - super(); - } - - /** - * Constructs an {@code LeaseExpiredException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public LeaseExpiredException(String message) { - super(message); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java deleted file mode 100644 index 02befaef9804f..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java +++ /dev/null @@ -1,251 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.Map; -import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - -/** - * LeaseManager is someone who can provide you leases based on your - * requirement. If you want to return the lease back before it expires, - * you can give it back to Lease Manager. He is the one responsible for - * the lifecycle of leases. The resource for which lease is created - * should have proper {@code equals} method implementation, resource - * equality is checked while the lease is created. - * - * @param Type of leases that this lease manager can create - */ -public class LeaseManager { - - private static final Logger LOG = - LoggerFactory.getLogger(LeaseManager.class); - - private final String name; - private final long defaultTimeout; - private Map> activeLeases; - private LeaseMonitor leaseMonitor; - private Thread leaseMonitorThread; - private boolean isRunning; - - /** - * Creates an instance of lease manager. - * - * @param name - * Name for the LeaseManager instance. - * @param defaultTimeout - * Default timeout in milliseconds to be used for lease creation. - */ - public LeaseManager(String name, long defaultTimeout) { - this.name = name; - this.defaultTimeout = defaultTimeout; - } - - /** - * Starts the lease manager service. - */ - public void start() { - LOG.debug("Starting {} LeaseManager service", name); - activeLeases = new ConcurrentHashMap<>(); - leaseMonitor = new LeaseMonitor(); - leaseMonitorThread = new Thread(leaseMonitor); - leaseMonitorThread.setName(name + "-LeaseManager#LeaseMonitor"); - leaseMonitorThread.setDaemon(true); - leaseMonitorThread.setUncaughtExceptionHandler((thread, throwable) -> { - // Let us just restart this thread after logging an error. - // if this thread is not running we cannot handle Lease expiry. - LOG.error("LeaseMonitor thread encountered an error. Thread: {}", - thread.toString(), throwable); - leaseMonitorThread.start(); - }); - LOG.debug("Starting {}-LeaseManager#LeaseMonitor Thread", name); - leaseMonitorThread.start(); - isRunning = true; - } - - /** - * Returns a lease for the specified resource with default timeout. - * - * @param resource - * Resource for which lease has to be created - * @throws LeaseAlreadyExistException - * If there is already a lease on the resource - */ - public synchronized Lease acquire(T resource) - throws LeaseAlreadyExistException { - return acquire(resource, defaultTimeout); - } - - /** - * Returns a lease for the specified resource with the timeout provided. - * - * @param resource - * Resource for which lease has to be created - * @param timeout - * The timeout in milliseconds which has to be set on the lease - * @throws LeaseAlreadyExistException - * If there is already a lease on the resource - */ - public synchronized Lease acquire(T resource, long timeout) - throws LeaseAlreadyExistException { - checkStatus(); - if (LOG.isDebugEnabled()) { - LOG.debug("Acquiring lease on {} for {} milliseconds", resource, timeout); - } - if(activeLeases.containsKey(resource)) { - throw new LeaseAlreadyExistException("Resource: " + resource); - } - Lease lease = new Lease<>(resource, timeout); - activeLeases.put(resource, lease); - leaseMonitorThread.interrupt(); - return lease; - } - - /** - * Returns a lease associated with the specified resource. - * - * @param resource - * Resource for which the lease has to be returned - * @throws LeaseNotFoundException - * If there is no active lease on the resource - */ - public Lease get(T resource) throws LeaseNotFoundException { - checkStatus(); - Lease lease = activeLeases.get(resource); - if(lease != null) { - return lease; - } - throw new LeaseNotFoundException("Resource: " + resource); - } - - /** - * Releases the lease associated with the specified resource. - * - * @param resource - * The for which the lease has to be released - * @throws LeaseNotFoundException - * If there is no active lease on the resource - */ - public synchronized void release(T resource) - throws LeaseNotFoundException { - checkStatus(); - if (LOG.isDebugEnabled()) { - LOG.debug("Releasing lease on {}", resource); - } - Lease lease = activeLeases.remove(resource); - if(lease == null) { - throw new LeaseNotFoundException("Resource: " + resource); - } - lease.invalidate(); - } - - /** - * Shuts down the LeaseManager and releases the resources. All the active - * {@link Lease} will be released (callbacks on leases will not be - * executed). - */ - public void shutdown() { - checkStatus(); - LOG.debug("Shutting down LeaseManager service"); - leaseMonitor.disable(); - leaseMonitorThread.interrupt(); - for(T resource : activeLeases.keySet()) { - try { - release(resource); - } catch(LeaseNotFoundException ex) { - //Ignore the exception, someone might have released the lease - } - } - isRunning = false; - } - - /** - * Throws {@link LeaseManagerNotRunningException} if the service is not - * running. - */ - private void checkStatus() { - if(!isRunning) { - throw new LeaseManagerNotRunningException("LeaseManager not running."); - } - } - - /** - * Monitors the leases and expires them based on the timeout, also - * responsible for executing the callbacks of expired leases. - */ - private final class LeaseMonitor implements Runnable { - - private boolean monitor = true; - private ExecutorService executorService; - - private LeaseMonitor() { - this.monitor = true; - this.executorService = Executors.newCachedThreadPool(); - } - - @Override - public void run() { - while (monitor) { - LOG.debug("{}-LeaseMonitor: checking for lease expiry", name); - long sleepTime = Long.MAX_VALUE; - - for (T resource : activeLeases.keySet()) { - try { - Lease lease = get(resource); - long remainingTime = lease.getRemainingTime(); - if (remainingTime <= 0) { - //Lease has timed out - List> leaseCallbacks = lease.getCallbacks(); - release(resource); - executorService.execute( - new LeaseCallbackExecutor(resource, leaseCallbacks)); - } else { - sleepTime = remainingTime > sleepTime ? - sleepTime : remainingTime; - } - } catch (LeaseNotFoundException | LeaseExpiredException ex) { - //Ignore the exception, someone might have released the lease - } - } - - try { - if(!Thread.interrupted()) { - Thread.sleep(sleepTime); - } - } catch (InterruptedException ignored) { - // This means a new lease is added to activeLeases. - } - } - } - - /** - * Disables lease monitor, next interrupt call on the thread - * will stop lease monitor. - */ - public void disable() { - monitor = false; - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java deleted file mode 100644 index ced31de439486..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -/** - * This exception represents that there LeaseManager service is not running. - */ -public class LeaseManagerNotRunningException extends RuntimeException { - - /** - * Constructs an {@code LeaseManagerNotRunningException} with {@code null} - * as its error detail message. - */ - public LeaseManagerNotRunningException() { - super(); - } - - /** - * Constructs an {@code LeaseManagerNotRunningException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public LeaseManagerNotRunningException(String message) { - super(message); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java deleted file mode 100644 index c292d33232310..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -/** - * This exception represents that the lease that is being accessed does not - * exist. - */ -public class LeaseNotFoundException extends LeaseException { - - /** - * Constructs an {@code LeaseNotFoundException} with {@code null} - * as its error detail message. - */ - public LeaseNotFoundException() { - super(); - } - - /** - * Constructs an {@code LeaseNotFoundException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public LeaseNotFoundException(String message) { - super(message); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java deleted file mode 100644 index 48ee2e1c6ab09..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * A generic lease management API which can be used if a service - * needs any kind of lease management. - */ - -package org.apache.hadoop.ozone.lease; -/* - This package contains lease management related classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java deleted file mode 100644 index 95dfd6c393cac..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java +++ /dev/null @@ -1,141 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; - -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -/** - * Lock implementation which also maintains counter. - */ -public final class ActiveLock { - - private ReadWriteLock lock; - private AtomicInteger count; - - /** - * Use ActiveLock#newInstance to create instance. - * - * @param fairness - if true the lock uses a fair ordering policy, else - * non-fair ordering. - */ - private ActiveLock(boolean fairness) { - this.lock = new ReentrantReadWriteLock(fairness); - this.count = new AtomicInteger(0); - } - - /** - * Creates a new instance of ActiveLock. - * - * @return new ActiveLock - */ - public static ActiveLock newInstance(boolean fairness) { - return new ActiveLock(fairness); - } - - /** - * Acquires read lock. - * - *

Acquires the read lock if the write lock is not held by - * another thread and returns immediately. - * - *

If the write lock is held by another thread then - * the current thread becomes disabled for thread scheduling - * purposes and lies dormant until the read lock has been acquired. - */ - void readLock() { - lock.readLock().lock(); - } - - /** - * Attempts to release the read lock. - * - *

If the number of readers is now zero then the lock - * is made available for write lock attempts. - */ - void readUnlock() { - lock.readLock().unlock(); - } - - /** - * Acquires write lock. - * - *

Acquires the write lock if neither the read nor write lock - * are held by another thread - * and returns immediately, setting the write lock hold count to - * one. - * - *

If the current thread already holds the write lock then the - * hold count is incremented by one and the method returns - * immediately. - * - *

If the lock is held by another thread then the current - * thread becomes disabled for thread scheduling purposes and - * lies dormant until the write lock has been acquired. - */ - void writeLock() { - lock.writeLock().lock(); - } - - /** - * Attempts to release the write lock. - * - *

If the current thread is the holder of this lock then - * the hold count is decremented. If the hold count is now - * zero then the lock is released. - */ - void writeUnlock() { - lock.writeLock().unlock(); - } - - /** - * Increment the active count of the lock. - */ - void incrementActiveCount() { - count.incrementAndGet(); - } - - /** - * Decrement the active count of the lock. - */ - void decrementActiveCount() { - count.decrementAndGet(); - } - - /** - * Returns the active count on the lock. - * - * @return Number of active leases on the lock. - */ - int getActiveLockCount() { - return count.get(); - } - - /** - * Resets the active count on the lock. - */ - void resetCounter() { - count.set(0); - } - - @Override - public String toString() { - return lock.toString(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java deleted file mode 100644 index 3c2b5d4a394c2..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java +++ /dev/null @@ -1,241 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; - -import org.apache.commons.pool2.impl.GenericObjectPool; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Consumer; - -/** - * Manages the locks on a given resource. A new lock is created for each - * and every unique resource. Uniqueness of resource depends on the - * {@code equals} implementation of it. - */ -public class LockManager { - - private static final Logger LOG = LoggerFactory.getLogger(LockManager.class); - - private final Map activeLocks = new ConcurrentHashMap<>(); - private final GenericObjectPool lockPool; - - /** - * Creates new LockManager instance with the given Configuration.and uses - * non-fair mode for locks. - * - * @param conf Configuration object - */ - public LockManager(final Configuration conf) { - this(conf, false); - } - - - /** - * Creates new LockManager instance with the given Configuration. - * - * @param conf Configuration object - * @param fair - true to use fair lock ordering, else non-fair lock ordering. - */ - public LockManager(final Configuration conf, boolean fair) { - final int maxPoolSize = conf.getInt( - HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY, - HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY_DEFAULT); - lockPool = - new GenericObjectPool<>(new PooledLockFactory(fair)); - lockPool.setMaxTotal(maxPoolSize); - } - - /** - * Acquires the lock on given resource. - * - *

If the lock is not available then the current thread becomes - * disabled for thread scheduling purposes and lies dormant until the - * lock has been acquired. - * - * @param resource on which the lock has to be acquired - * @deprecated Use {@link LockManager#writeLock} instead - */ - public void lock(final R resource) { - writeLock(resource); - } - - /** - * Releases the lock on given resource. - * - * @param resource for which the lock has to be released - * @deprecated Use {@link LockManager#writeUnlock} instead - */ - public void unlock(final R resource) { - writeUnlock(resource); - } - - /** - * Acquires the read lock on given resource. - * - *

Acquires the read lock on resource if the write lock is not held by - * another thread and returns immediately. - * - *

If the write lock on resource is held by another thread then - * the current thread becomes disabled for thread scheduling - * purposes and lies dormant until the read lock has been acquired. - * - * @param resource on which the read lock has to be acquired - */ - public void readLock(final R resource) { - acquire(resource, ActiveLock::readLock); - } - - /** - * Releases the read lock on given resource. - * - * @param resource for which the read lock has to be released - * @throws IllegalMonitorStateException if the current thread does not - * hold this lock - */ - public void readUnlock(final R resource) throws IllegalMonitorStateException { - release(resource, ActiveLock::readUnlock); - } - - /** - * Acquires the write lock on given resource. - * - *

Acquires the write lock on resource if neither the read nor write lock - * are held by another thread and returns immediately. - * - *

If the current thread already holds the write lock then the - * hold count is incremented by one and the method returns - * immediately. - * - *

If the lock is held by another thread then the current - * thread becomes disabled for thread scheduling purposes and - * lies dormant until the write lock has been acquired. - * - * @param resource on which the lock has to be acquired - */ - public void writeLock(final R resource) { - acquire(resource, ActiveLock::writeLock); - } - - /** - * Releases the write lock on given resource. - * - * @param resource for which the lock has to be released - * @throws IllegalMonitorStateException if the current thread does not - * hold this lock - */ - public void writeUnlock(final R resource) - throws IllegalMonitorStateException { - release(resource, ActiveLock::writeUnlock); - } - - /** - * Acquires the lock on given resource using the provided lock function. - * - * @param resource on which the lock has to be acquired - * @param lockFn function to acquire the lock - */ - private void acquire(final R resource, final Consumer lockFn) { - lockFn.accept(getLockForLocking(resource)); - } - - /** - * Releases the lock on given resource using the provided release function. - * - * @param resource for which the lock has to be released - * @param releaseFn function to release the lock - */ - private void release(final R resource, final Consumer releaseFn) { - final ActiveLock lock = getLockForReleasing(resource); - releaseFn.accept(lock); - decrementActiveLockCount(resource); - } - - /** - * Returns {@link ActiveLock} instance for the given resource, - * on which the lock can be acquired. - * - * @param resource on which the lock has to be acquired - * @return {@link ActiveLock} instance - */ - private ActiveLock getLockForLocking(final R resource) { - /* - * While getting a lock object for locking we should - * atomically increment the active count of the lock. - * - * This is to avoid cases where the selected lock could - * be removed from the activeLocks map and returned to - * the object pool. - */ - return activeLocks.compute(resource, (k, v) -> { - final ActiveLock lock; - try { - if (v == null) { - lock = lockPool.borrowObject(); - } else { - lock = v; - } - lock.incrementActiveCount(); - } catch (Exception ex) { - LOG.error("Unable to obtain lock.", ex); - throw new RuntimeException(ex); - } - return lock; - }); - } - - /** - * Returns {@link ActiveLock} instance for the given resource, - * for which the lock has to be released. - * - * @param resource for which the lock has to be released - * @return {@link ActiveLock} instance - */ - private ActiveLock getLockForReleasing(final R resource) { - if (activeLocks.containsKey(resource)) { - return activeLocks.get(resource); - } - // Someone is releasing a lock which was never acquired. - LOG.error("Trying to release the lock on {}, which was never acquired.", - resource); - throw new IllegalMonitorStateException("Releasing lock on resource " - + resource + " without acquiring lock"); - } - - /** - * Decrements the active lock count and returns the {@link ActiveLock} - * object to pool if the active count is 0. - * - * @param resource resource to which the ActiveLock is associated - */ - private void decrementActiveLockCount(final R resource) { - activeLocks.computeIfPresent(resource, (k, v) -> { - v.decrementActiveCount(); - if (v.getActiveLockCount() != 0) { - return v; - } - lockPool.returnObject(v); - return null; - }); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java deleted file mode 100644 index 1e3ba05a3a2b2..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; - -import org.apache.commons.pool2.BasePooledObjectFactory; -import org.apache.commons.pool2.PooledObject; -import org.apache.commons.pool2.impl.DefaultPooledObject; - -/** - * Pool factory to create {@code ActiveLock} instances. - */ -public class PooledLockFactory extends BasePooledObjectFactory { - - private boolean fairness; - - PooledLockFactory(boolean fair) { - this.fairness = fair; - } - @Override - public ActiveLock create() throws Exception { - return ActiveLock.newInstance(fairness); - } - - @Override - public PooledObject wrap(ActiveLock activeLock) { - return new DefaultPooledObject<>(activeLock); - } - - @Override - public void activateObject(PooledObject pooledObject) { - pooledObject.getObject().resetCounter(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java deleted file mode 100644 index 5c677ced74528..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; -/* - This package contains the lock related classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java deleted file mode 100644 index db399db25ab07..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -/** - This package contains class that support ozone implementation on the datanode - side. - - Main parts of ozone on datanode are: - - 1. REST Interface - This code lives under the web directory and listens to the - WebHDFS port. - - 2. Datanode container classes: This support persistence of ozone objects on - datanode. These classes live under container directory. - - 3. Client and Shell: We also support a ozone REST client lib, they are under - web/client and web/ozShell. - - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ProtocolMessageMetrics.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ProtocolMessageMetrics.java deleted file mode 100644 index 96725f269a124..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ProtocolMessageMetrics.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicLong; - -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; - -import com.google.protobuf.ProtocolMessageEnum; - -/** - * Metrics to count all the subtypes of a specific message. - */ -public class ProtocolMessageMetrics implements MetricsSource { - - private String name; - - private String description; - - private Map counters = - new ConcurrentHashMap<>(); - - public static ProtocolMessageMetrics create(String name, - String description, ProtocolMessageEnum[] types) { - ProtocolMessageMetrics protocolMessageMetrics = - new ProtocolMessageMetrics(name, description, - types); - return protocolMessageMetrics; - } - - public ProtocolMessageMetrics(String name, String description, - ProtocolMessageEnum[] values) { - this.name = name; - this.description = description; - for (ProtocolMessageEnum value : values) { - counters.put(value, new AtomicLong(0)); - } - } - - public void increment(ProtocolMessageEnum key) { - counters.get(key).incrementAndGet(); - } - - public void register() { - DefaultMetricsSystem.instance() - .register(name, description, this); - } - - public void unregister() { - DefaultMetricsSystem.instance().unregisterSource(name); - } - - @Override - public void getMetrics(MetricsCollector collector, boolean all) { - MetricsRecordBuilder builder = collector.addRecord(name); - counters.forEach((key, value) -> { - builder.addCounter(new MetricName(key.toString(), ""), value.longValue()); - }); - builder.endRecord(); - } - - /** - * Simple metrics info implementation. - */ - public static class MetricName implements MetricsInfo { - private String name; - private String description; - - public MetricName(String name, String description) { - this.name = name; - this.description = description; - } - - @Override - public String name() { - return name; - } - - @Override - public String description() { - return description; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java deleted file mode 100644 index 860386d9fdcc5..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.protocolPB; - -/** - * This package contains classes for the Protocol Buffers binding of Ozone - * protocols. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java deleted file mode 100644 index 4177b96a354c3..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.utils; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.fasterxml.jackson.databind.type.CollectionType; - -import java.io.IOException; -import java.util.List; - -/** - * JSON Utility functions used in ozone. - */ -public final class JsonUtils { - - // Reuse ObjectMapper instance for improving performance. - // ObjectMapper is thread safe as long as we always configure instance - // before use. - private static final ObjectMapper MAPPER = new ObjectMapper(); - private static final ObjectReader READER = MAPPER.readerFor(Object.class); - private static final ObjectWriter WRITTER = - MAPPER.writerWithDefaultPrettyPrinter(); - - private JsonUtils() { - // Never constructed - } - - public static String toJsonStringWithDefaultPrettyPrinter(Object obj) - throws IOException { - return WRITTER.writeValueAsString(obj); - } - - public static String toJsonString(Object obj) throws IOException { - return MAPPER.writeValueAsString(obj); - } - - /** - * Deserialize a list of elements from a given string, - * each element in the list is in the given type. - * - * @param str json string. - * @param elementType element type. - * @return List of elements of type elementType - * @throws IOException - */ - public static List toJsonList(String str, Class elementType) - throws IOException { - CollectionType type = MAPPER.getTypeFactory() - .constructCollectionType(List.class, elementType); - return MAPPER.readValue(str, type); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java deleted file mode 100644 index e5812c00d941e..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.utils; diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto deleted file mode 100644 index 1bfe4d1247c5f..0000000000000 --- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto +++ /dev/null @@ -1,469 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and Unstable. - * Please see http://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/InterfaceClassification.html - * for what changes are allowed for a *Unstable* .proto interface. - */ - -// This file contains protocol buffers that are used to transfer data -// to and from the datanode. -syntax = "proto2"; -option java_package = "org.apache.hadoop.hdds.protocol.datanode.proto"; -option java_outer_classname = "ContainerProtos"; -option java_generate_equals_and_hash = true; -package hadoop.hdds.datanode; - -/** - * Commands that are used to manipulate the state of containers on a datanode. - * - * These commands allow us to work against the datanode - from - * StorageContainer Manager as well as clients. - * - * 1. CreateContainer - This call is usually made by Storage Container - * manager, when we need to create a new container on a given datanode. - * - * 2. ReadContainer - Allows end user to stat a container. For example - * this allows us to return the metadata of a container. - * - * 3. UpdateContainer - Updates a container metadata. - - * 4. DeleteContainer - This call is made to delete a container. - * - * 5. ListContainer - Returns the list of containers on this - * datanode. This will be used by tests and tools. - * - * 6. PutBlock - Given a valid container, creates a block. - * - * 7. GetBlock - Allows user to read the metadata of a block. - * - * 8. DeleteBlock - Deletes a given block. - * - * 9. ListBlock - Returns a list of blocks that are present inside - * a given container. - * - * 10. ReadChunk - Allows us to read a chunk. - * - * 11. DeleteChunk - Delete an unused chunk. - * - * 12. WriteChunk - Allows us to write a chunk - * - * 13. ListChunk - Given a Container/Block returns the list of Chunks. - * - * 14. CompactChunk - Re-writes a chunk based on Offsets. - * - * 15. PutSmallFile - A single RPC that combines both putBlock and WriteChunk. - * - * 16. GetSmallFile - A single RPC that combines both getBlock and ReadChunk. - * - * 17. CloseContainer - Closes an open container and makes it immutable. - * - * 18. CopyContainer - Copies a container from a remote machine. - */ - -enum Type { - CreateContainer = 1; - ReadContainer = 2; - UpdateContainer = 3; - DeleteContainer = 4; - ListContainer = 5; - - PutBlock = 6; - GetBlock = 7; - DeleteBlock = 8; - ListBlock = 9; - - ReadChunk = 10; - DeleteChunk = 11; - WriteChunk = 12; - ListChunk = 13; - CompactChunk = 14; - - /** Combines Block and Chunk Operation into Single RPC. */ - PutSmallFile = 15; - GetSmallFile = 16; - CloseContainer = 17; - GetCommittedBlockLength = 18; -} - - -enum Result { - SUCCESS = 1; - UNSUPPORTED_REQUEST = 2; - MALFORMED_REQUEST = 3; - CONTAINER_INTERNAL_ERROR = 4; - INVALID_CONFIG = 5; - INVALID_FILE_HASH_FOUND = 6; - CONTAINER_EXISTS = 7; - NO_SUCH_ALGORITHM = 8; - CONTAINER_NOT_FOUND = 9; - IO_EXCEPTION = 10; - UNABLE_TO_READ_METADATA_DB = 11; - NO_SUCH_BLOCK = 12; - OVERWRITE_FLAG_REQUIRED = 13; - UNABLE_TO_FIND_DATA_DIR = 14; - INVALID_WRITE_SIZE = 15; - CHECKSUM_MISMATCH = 16; - UNABLE_TO_FIND_CHUNK = 17; - PROTOC_DECODING_ERROR = 18; - INVALID_ARGUMENT = 19; - PUT_SMALL_FILE_ERROR = 20; - GET_SMALL_FILE_ERROR = 21; - CLOSED_CONTAINER_IO = 22; - ERROR_IN_COMPACT_DB = 24; - UNCLOSED_CONTAINER_IO = 25; - DELETE_ON_OPEN_CONTAINER = 26; - CLOSED_CONTAINER_RETRY = 27; - INVALID_CONTAINER_STATE = 28; - DISK_OUT_OF_SPACE = 29; - CONTAINER_ALREADY_EXISTS = 30; - CONTAINER_METADATA_ERROR = 31; - CONTAINER_FILES_CREATE_ERROR = 32; - CONTAINER_CHECKSUM_ERROR = 33; - UNKNOWN_CONTAINER_TYPE = 34; - BLOCK_NOT_COMMITTED = 35; - CONTAINER_UNHEALTHY = 36; - UNKNOWN_BCSID = 37; - BCSID_MISMATCH = 38; - CONTAINER_NOT_OPEN = 39; - CONTAINER_MISSING = 40; - BLOCK_TOKEN_VERIFICATION_FAILED = 41; - ERROR_IN_DB_SYNC = 42; -} - -/** - * Block ID that uniquely identify a block in Datanode. - */ -message DatanodeBlockID { - required int64 containerID = 1; - required int64 localID = 2; - optional uint64 blockCommitSequenceId = 3 [default = 0]; -} - -message KeyValue { - required string key = 1; - optional string value = 2; -} - -message ContainerCommandRequestProto { - required Type cmdType = 1; // Type of the command - - // A string that identifies this command, we generate Trace ID in Ozone - // frontend and this allows us to trace that command all over ozone. - optional string traceID = 2; - - required int64 containerID = 3; - required string datanodeUuid = 4; - optional string pipelineID = 5; - - // One of the following command is available when the corresponding - // cmdType is set. At the protocol level we allow only - // one command in each packet. - // TODO : Upgrade to Protobuf 2.6 or later. - optional CreateContainerRequestProto createContainer = 6; - optional ReadContainerRequestProto readContainer = 7; - optional UpdateContainerRequestProto updateContainer = 8; - optional DeleteContainerRequestProto deleteContainer = 9; - optional ListContainerRequestProto listContainer = 10; - optional CloseContainerRequestProto closeContainer = 11; - - optional PutBlockRequestProto putBlock = 12; - optional GetBlockRequestProto getBlock = 13; - optional DeleteBlockRequestProto deleteBlock = 14; - optional ListBlockRequestProto listBlock = 15; - - optional ReadChunkRequestProto readChunk = 16; - optional WriteChunkRequestProto writeChunk = 17; - optional DeleteChunkRequestProto deleteChunk = 18; - optional ListChunkRequestProto listChunk = 19; - - optional PutSmallFileRequestProto putSmallFile = 20; - optional GetSmallFileRequestProto getSmallFile = 21; - optional GetCommittedBlockLengthRequestProto getCommittedBlockLength = 22; - optional string encodedToken = 23; -} - -message ContainerCommandResponseProto { - required Type cmdType = 1; - optional string traceID = 2; - - required Result result = 3; - optional string message = 4; - - optional CreateContainerResponseProto createContainer = 5; - optional ReadContainerResponseProto readContainer = 6; - optional UpdateContainerResponseProto updateContainer = 7; - optional DeleteContainerResponseProto deleteContainer = 8; - optional ListContainerResponseProto listContainer = 9; - optional CloseContainerResponseProto closeContainer = 10; - - optional PutBlockResponseProto putBlock = 11; - optional GetBlockResponseProto getBlock = 12; - optional DeleteBlockResponseProto deleteBlock = 13; - optional ListBlockResponseProto listBlock = 14; - - optional WriteChunkResponseProto writeChunk = 15; - optional ReadChunkResponseProto readChunk = 16; - optional DeleteChunkResponseProto deleteChunk = 17; - optional ListChunkResponseProto listChunk = 18; - - optional PutSmallFileResponseProto putSmallFile = 19; - optional GetSmallFileResponseProto getSmallFile = 20; - - optional GetCommittedBlockLengthResponseProto getCommittedBlockLength = 21; -} - -message ContainerDataProto { - enum State { - OPEN = 1; - CLOSING = 2; - QUASI_CLOSED =3; - CLOSED = 4; - UNHEALTHY = 5; - INVALID = 6; - } - required int64 containerID = 1; - repeated KeyValue metadata = 2; - optional string containerPath = 4; - optional int64 bytesUsed = 6; - optional int64 size = 7; - optional int64 blockCount = 8; - optional State state = 9 [default = OPEN]; - optional ContainerType containerType = 10 [default = KeyValueContainer]; -} - -message Container2BCSIDMapProto { - // repeated Container2BCSIDMapEntryProto container2BCSID = 1; - map container2BCSID = 1; -} - -enum ContainerType { - KeyValueContainer = 1; -} - - -// Container Messages. -message CreateContainerRequestProto { - repeated KeyValue metadata = 2; - optional ContainerType containerType = 3 [default = KeyValueContainer]; -} - -message CreateContainerResponseProto { -} - -message ReadContainerRequestProto { -} - -message ReadContainerResponseProto { - optional ContainerDataProto containerData = 1; -} - -message UpdateContainerRequestProto { - repeated KeyValue metadata = 2; - optional bool forceUpdate = 3 [default = false]; -} - -message UpdateContainerResponseProto { -} - -message DeleteContainerRequestProto { - optional bool forceDelete = 2 [default = false]; -} - -message DeleteContainerResponseProto { -} - -message ListContainerRequestProto { - optional uint32 count = 2; // Max Results to return -} - -message ListContainerResponseProto { - repeated ContainerDataProto containerData = 1; -} - -message CloseContainerRequestProto { -} - -message CloseContainerResponseProto { - optional string hash = 1; - optional int64 containerID = 2; -} - -message BlockData { - required DatanodeBlockID blockID = 1; - optional int64 flags = 2; // for future use. - repeated KeyValue metadata = 3; - repeated ChunkInfo chunks = 4; - optional int64 size = 5; -} - -// Block Messages. -message PutBlockRequestProto { - required BlockData blockData = 1; -} - -message PutBlockResponseProto { - required GetCommittedBlockLengthResponseProto committedBlockLength = 1; -} - -message GetBlockRequestProto { - required DatanodeBlockID blockID = 1; -} - -message GetBlockResponseProto { - required BlockData blockData = 1; -} - - -message DeleteBlockRequestProto { - required DatanodeBlockID blockID = 1; -} - -message GetCommittedBlockLengthRequestProto { - required DatanodeBlockID blockID = 1; -} - -message GetCommittedBlockLengthResponseProto { - required DatanodeBlockID blockID = 1; - required int64 blockLength = 2; -} - -message DeleteBlockResponseProto { -} - -message ListBlockRequestProto { - optional int64 startLocalID = 2; - required uint32 count = 3; - -} - -message ListBlockResponseProto { - repeated BlockData blockData = 1; -} - -// Chunk Operations - -message ChunkInfo { - required string chunkName = 1; - required uint64 offset = 2; - required uint64 len = 3; - repeated KeyValue metadata = 4; - required ChecksumData checksumData =5; -} - -message ChecksumData { - required ChecksumType type = 1; - required uint32 bytesPerChecksum = 2; - repeated bytes checksums = 3; -} - -enum ChecksumType { - NONE = 1; - CRC32 = 2; - CRC32C = 3; - SHA256 = 4; - MD5 = 5; -} - -message WriteChunkRequestProto { - required DatanodeBlockID blockID = 1; - required ChunkInfo chunkData = 2; - optional bytes data = 3; -} - -message WriteChunkResponseProto { -} - -message ReadChunkRequestProto { - required DatanodeBlockID blockID = 1; - required ChunkInfo chunkData = 2; -} - -message ReadChunkResponseProto { - required DatanodeBlockID blockID = 1; - required ChunkInfo chunkData = 2; - required bytes data = 3; -} - -message DeleteChunkRequestProto { - required DatanodeBlockID blockID = 1; - required ChunkInfo chunkData = 2; -} - -message DeleteChunkResponseProto { -} - -message ListChunkRequestProto { - required DatanodeBlockID blockID = 1; - required string prevChunkName = 2; - required uint32 count = 3; -} - -message ListChunkResponseProto { - repeated ChunkInfo chunkData = 1; -} - -/** For small file access combines write chunk and putBlock into a single -RPC */ - -message PutSmallFileRequestProto { - required PutBlockRequestProto block = 1; - required ChunkInfo chunkInfo = 2; - required bytes data = 3; -} - - -message PutSmallFileResponseProto { - required GetCommittedBlockLengthResponseProto committedBlockLength = 1; -} - -message GetSmallFileRequestProto { - required GetBlockRequestProto block = 1; -} - -message GetSmallFileResponseProto { - required ReadChunkResponseProto data = 1; -} - -message CopyContainerRequestProto { - required int64 containerID = 1; - required uint64 readOffset = 2; - optional uint64 len = 3; -} - -message CopyContainerResponseProto { - required int64 containerID = 1; - required uint64 readOffset = 2; - required uint64 len = 3; - required bool eof = 4; - required bytes data = 5; - optional int64 checksum = 6; -} - -service XceiverClientProtocolService { - // A client-to-datanode RPC to send container commands - rpc send(stream ContainerCommandRequestProto) returns - (stream ContainerCommandResponseProto) {}; - -} - -service IntraDatanodeProtocolService { - // An intradatanode service to copy the raw container data between nodes - rpc download (CopyContainerRequestProto) returns (stream CopyContainerResponseProto); -} diff --git a/hadoop-hdds/common/src/main/proto/FSProtos.proto b/hadoop-hdds/common/src/main/proto/FSProtos.proto deleted file mode 100644 index c3b768ab67ed1..0000000000000 --- a/hadoop-hdds/common/src/main/proto/FSProtos.proto +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.fs"; -option java_outer_classname = "FSProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.fs; - -message FsPermissionProto { - required uint32 perm = 1; // UNIX-style mode bits -} - -/* - * FileStatus encoding. Field IDs match those from HdfsFileStatusProto, but - * cross-serialization is not an explicitly supported use case. Unlike HDFS, - * most fields are optional and do not define defaults. - */ -message FileStatusProto { - enum FileType { - FT_DIR = 1; - FT_FILE = 2; - FT_SYMLINK = 3; - } - enum Flags { - HAS_ACL = 0x01; // has ACLs - HAS_CRYPT = 0x02; // encrypted - HAS_EC = 0x04; // erasure coded - SNAPSHOT_ENABLED = 0x08; // snapshot enabled - } - required FileType fileType = 1; - required string path = 2; - optional uint64 length = 3; - optional FsPermissionProto permission = 4; - optional string owner = 5; - optional string group = 6; - optional uint64 modification_time = 7; - optional uint64 access_time = 8; - optional string symlink = 9; - optional uint32 block_replication = 10; - optional uint64 block_size = 11; - // locations = 12 - // alias = 13 - // childrenNum = 14 - optional bytes encryption_data = 15; - // storagePolicy = 16 - optional bytes ec_data = 17; - optional uint32 flags = 18 [default = 0]; -} - -/** - * Placeholder type for consistent basic FileSystem operations. - */ -message LocalFileSystemPathHandleProto { - optional uint64 mtime = 1; - optional string path = 2; -} diff --git a/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto b/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto deleted file mode 100644 index 72e0e9f66f7d4..0000000000000 --- a/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and unstable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *unstable* .proto interface. - */ - -option java_package = "org.apache.hadoop.hdds.protocol.proto"; - -option java_outer_classname = "SCMSecurityProtocolProtos"; - -option java_generic_services = true; - -option java_generate_equals_and_hash = true; - -package hadoop.hdds.security; - -import "hdds.proto"; - -/** -All commands is send as request and all response come back via -Response class. If adding new functions please follow this protocol, since -our tracing and visibility tools depend on this pattern. -*/ -message SCMSecurityRequest { - required Type cmdType = 1; // Type of the command - - optional string traceID = 2; - - optional SCMGetDataNodeCertRequestProto getDataNodeCertRequest = 3; - optional SCMGetOMCertRequestProto getOMCertRequest = 4; - optional SCMGetCertificateRequestProto getCertificateRequest = 5; - optional SCMGetCACertificateRequestProto getCACertificateRequest = 6; - -} - -message SCMSecurityResponse { - required Type cmdType = 1; // Type of the command - - // A string that identifies this command, we generate Trace ID in Ozone - // frontend and this allows us to trace that command all over ozone. - optional string traceID = 2; - - optional bool success = 3 [default = true]; - - optional string message = 4; - - required Status status = 5; - - optional SCMGetCertResponseProto getCertResponseProto = 6; - -} - -enum Type { - GetDataNodeCertificate = 1; - GetOMCertificate = 2; - GetCertificate = 3; - GetCACertificate = 4; -} - -enum Status { - OK = 1; -} -/** -* This message is send by data node to prove its identity and get an SCM -* signed certificate. -*/ -message SCMGetDataNodeCertRequestProto { - required DatanodeDetailsProto datanodeDetails = 1; - required string CSR = 2; -} - -/** -* This message is send by OzoneManager to prove its identity and get an SCM -* signed certificate. -*/ -message SCMGetOMCertRequestProto { - required OzoneManagerDetailsProto omDetails = 1; - required string CSR = 2; -} - -/** -* Proto request to get a certificate with given serial id. -*/ -message SCMGetCertificateRequestProto { - required string certSerialId = 1; -} - -/** -* Proto request to get CA certificate. -*/ -message SCMGetCACertificateRequestProto { -} - -/** - * Returns a certificate signed by SCM. - */ -message SCMGetCertResponseProto { - enum ResponseCode { - success = 1; - authenticationFailed = 2; - invalidCSR = 3; - } - required ResponseCode responseCode = 1; - required string x509Certificate = 2; // Base64 encoded X509 certificate. - optional string x509CACertificate = 3; // Base64 encoded CA X509 certificate. -} - - -service SCMSecurityProtocolService { - rpc submitRequest (SCMSecurityRequest) returns (SCMSecurityResponse); -} diff --git a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto deleted file mode 100644 index fc7a5988ce669..0000000000000 --- a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto +++ /dev/null @@ -1,212 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and unstable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *unstable* .proto interface. - */ - -option java_package = "org.apache.hadoop.hdds.protocol.proto"; -option java_outer_classname = "ScmBlockLocationProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.hdds.block; - -import "hdds.proto"; - - -// SCM Block protocol - -enum Type { - AllocateScmBlock = 11; - DeleteScmKeyBlocks = 12; - GetScmInfo = 13; - SortDatanodes = 14; -} - -message SCMBlockLocationRequest { - required Type cmdType = 1; // Type of the command - - // A string that identifies this command, we generate Trace ID in Ozone - // frontend and this allows us to trace that command all over ozone. - optional string traceID = 2; - - optional UserInfo userInfo = 3; - - optional AllocateScmBlockRequestProto allocateScmBlockRequest = 11; - optional DeleteScmKeyBlocksRequestProto deleteScmKeyBlocksRequest = 12; - optional hadoop.hdds.GetScmInfoRequestProto getScmInfoRequest = 13; - optional SortDatanodesRequestProto sortDatanodesRequest = 14; -} - -message SCMBlockLocationResponse { - required Type cmdType = 1; // Type of the command - - // A string that identifies this command, we generate Trace ID in Ozone - // frontend and this allows us to trace that command all over ozone. - optional string traceID = 2; - - optional bool success = 3 [default=true]; - - optional string message = 4; - - required Status status = 5; - - optional string leaderOMNodeId = 6; - - optional AllocateScmBlockResponseProto allocateScmBlockResponse = 11; - optional DeleteScmKeyBlocksResponseProto deleteScmKeyBlocksResponse = 12; - optional hadoop.hdds.GetScmInfoResponseProto getScmInfoResponse = 13; - optional SortDatanodesResponseProto sortDatanodesResponse = 14; -} - -/** - User information which will be extracted during RPC context and used - during validating Acl. -*/ -message UserInfo { - optional string userName = 1; - optional string remoteAddress = 3; -} - -enum Status { - OK = 1; - FAILED_TO_LOAD_NODEPOOL = 2; - FAILED_TO_FIND_NODE_IN_POOL = 3; - FAILED_TO_FIND_HEALTHY_NODES = 4; - FAILED_TO_FIND_NODES_WITH_SPACE = 5; - FAILED_TO_FIND_SUITABLE_NODE = 6; - INVALID_CAPACITY = 7; - INVALID_BLOCK_SIZE = 8; - SAFE_MODE_EXCEPTION = 9; - FAILED_TO_LOAD_OPEN_CONTAINER = 10; - FAILED_TO_ALLOCATE_CONTAINER = 11; - FAILED_TO_CHANGE_CONTAINER_STATE = 12; - FAILED_TO_CHANGE_PIPELINE_STATE = 13; - CONTAINER_EXISTS = 14; - FAILED_TO_FIND_CONTAINER = 15; - FAILED_TO_FIND_CONTAINER_WITH_SPACE = 16; - BLOCK_EXISTS = 17; - FAILED_TO_FIND_BLOCK = 18; - IO_EXCEPTION = 19; - UNEXPECTED_CONTAINER_STATE = 20; - SCM_NOT_INITIALIZED = 21; - DUPLICATE_DATANODE = 22; - NO_SUCH_DATANODE = 23; - NO_REPLICA_FOUND = 24; - FAILED_TO_FIND_ACTIVE_PIPELINE = 25; - FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY = 26; - FAILED_TO_ALLOCATE_ENOUGH_BLOCKS = 27; - INTERNAL_ERROR = 29; -} - -/** -* Request send to SCM asking allocate block of specified size. -*/ -message AllocateScmBlockRequestProto { - required uint64 size = 1; - required uint32 numBlocks = 2; - required ReplicationType type = 3; - required hadoop.hdds.ReplicationFactor factor = 4; - required string owner = 5; - optional ExcludeListProto excludeList = 7; -} - -/** - * A delete key request sent by OM to SCM, it contains - * multiple number of keys (and their blocks). - */ -message DeleteScmKeyBlocksRequestProto { - repeated KeyBlocks keyBlocks = 1; -} - -/** - * A object key and all its associated blocks. - * We need to encapsulate object key name plus the blocks in this potocol - * because SCM needs to response OM with the keys it has deleted. - * If the response only contains blocks, it will be very expensive for - * OM to figure out what keys have been deleted. - */ -message KeyBlocks { - required string key = 1; - repeated BlockID blocks = 2; -} - -/** - * A delete key response from SCM to OM, it contains multiple child-results. - * Each child-result represents a key deletion result, only if all blocks of - * a key are successfully deleted, this key result is considered as succeed. - */ -message DeleteScmKeyBlocksResponseProto { - repeated DeleteKeyBlocksResultProto results = 1; -} - -/** - * A key deletion result. It contains all the block deletion results. - */ -message DeleteKeyBlocksResultProto { - required string objectKey = 1; - repeated DeleteScmBlockResult blockResults = 2; -} - -message DeleteScmBlockResult { - enum Result { - success = 1; - safeMode = 2; - errorNotFound = 3; - unknownFailure = 4; - } - required Result result = 1; - required BlockID blockID = 2; -} - -message AllocateBlockResponse { - optional ContainerBlockID containerBlockID = 1; - optional hadoop.hdds.Pipeline pipeline = 2; -} - -/** - * Reply from SCM indicating that the container. - */ -message AllocateScmBlockResponseProto { - repeated AllocateBlockResponse blocks = 3; -} - -/** - * Datanode sort request sent by OM to SCM, it contains - * multiple number of datanodes. - */ -message SortDatanodesRequestProto{ - required string client = 1; - repeated string nodeNetworkName = 2; -} - -message SortDatanodesResponseProto{ - repeated DatanodeDetailsProto node = 1; -} - -/** - * Protocol used from OzoneManager to StorageContainerManager. - * See request and response messages for details of the RPC calls. - */ -service ScmBlockLocationProtocolService { - - rpc send(SCMBlockLocationRequest) - returns (SCMBlockLocationResponse); -} diff --git a/hadoop-hdds/common/src/main/proto/Security.proto b/hadoop-hdds/common/src/main/proto/Security.proto deleted file mode 100644 index a3ce7392d0b0f..0000000000000 --- a/hadoop-hdds/common/src/main/proto/Security.proto +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.security.proto"; -option java_outer_classname = "SecurityProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.common; - -/** - * Security token identifier - */ -message TokenProto { - required bytes identifier = 1; - required bytes password = 2; - required string kind = 3; - required string service = 4; -} - -message CredentialsKVProto { - required string alias = 1; - optional hadoop.common.TokenProto token = 2; - optional bytes secret = 3; -} - -message CredentialsProto { - repeated hadoop.common.CredentialsKVProto tokens = 1; - repeated hadoop.common.CredentialsKVProto secrets = 2; -} - -message GetDelegationTokenRequestProto { - required string renewer = 1; -} - -message GetDelegationTokenResponseProto { - optional hadoop.common.TokenProto token = 1; -} - -message RenewDelegationTokenRequestProto { - required hadoop.common.TokenProto token = 1; -} - -message RenewDelegationTokenResponseProto { - required uint64 newExpiryTime = 1; -} - -message CancelDelegationTokenRequestProto { - required hadoop.common.TokenProto token = 1; -} - -message CancelDelegationTokenResponseProto { // void response -} diff --git a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto deleted file mode 100644 index 8ea72b6cd1780..0000000000000 --- a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto +++ /dev/null @@ -1,330 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and unstable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *unstable* .proto interface. - */ - -option java_package = "org.apache.hadoop.hdds.protocol.proto"; -option java_outer_classname = "StorageContainerLocationProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.hdds.container; - -import "hdds.proto"; - -/** - All functions are dispatched as Request/Response under Ozone. - if you add newe functions, please add them using same pattern. -*/ -message ScmContainerLocationRequest { - required Type cmdType = 1; // Type of the command - - // A string that identifies this command, we generate Trace ID in Ozone - // frontend and this allows us to trace that command all over ozone. - optional string traceID = 2; - - optional ContainerRequestProto containerRequest = 6; - optional GetContainerRequestProto getContainerRequest = 7; - optional GetContainerWithPipelineRequestProto getContainerWithPipelineRequest = 8; - optional SCMListContainerRequestProto scmListContainerRequest = 9; - optional SCMDeleteContainerRequestProto scmDeleteContainerRequest = 10; - optional NodeQueryRequestProto nodeQueryRequest = 11; - optional ObjectStageChangeRequestProto objectStageChangeRequest = 12; - optional PipelineRequestProto pipelineRequest = 13; - optional ListPipelineRequestProto listPipelineRequest = 14; - optional ActivatePipelineRequestProto activatePipelineRequest = 15; - optional DeactivatePipelineRequestProto deactivatePipelineRequest = 16; - optional ClosePipelineRequestProto closePipelineRequest = 17; - optional GetScmInfoRequestProto getScmInfoRequest = 18; - optional InSafeModeRequestProto inSafeModeRequest = 19; - optional ForceExitSafeModeRequestProto forceExitSafeModeRequest = 20; - optional StartReplicationManagerRequestProto startReplicationManagerRequest = 21; - optional StopReplicationManagerRequestProto stopReplicationManagerRequest = 22; - optional ReplicationManagerStatusRequestProto seplicationManagerStatusRequest = 23; - -} - -message ScmContainerLocationResponse { - required Type cmdType = 1; // Type of the command - - optional string traceID = 2; - - optional bool success = 3 [default = true]; - - optional string message = 4; - - required Status status = 5; - - optional ContainerResponseProto containerResponse = 6; - optional GetContainerResponseProto getContainerResponse = 7; - optional GetContainerWithPipelineResponseProto getContainerWithPipelineResponse = 8; - optional SCMListContainerResponseProto scmListContainerResponse = 9; - optional SCMDeleteContainerResponseProto scmDeleteContainerResponse = 10; - optional NodeQueryResponseProto nodeQueryResponse = 11; - optional ObjectStageChangeResponseProto objectStageChangeResponse = 12; - optional PipelineResponseProto pipelineResponse = 13; - optional ListPipelineResponseProto listPipelineResponse = 14; - optional ActivatePipelineResponseProto activatePipelineResponse = 15; - optional DeactivatePipelineResponseProto deactivatePipelineResponse = 16; - optional ClosePipelineResponseProto closePipelineResponse = 17; - optional GetScmInfoResponseProto getScmInfoResponse = 18; - optional InSafeModeResponseProto inSafeModeResponse = 19; - optional ForceExitSafeModeResponseProto forceExitSafeModeResponse = 20; - optional StartReplicationManagerResponseProto startReplicationManagerResponse = 21; - optional StopReplicationManagerResponseProto stopReplicationManagerResponse = 22; - optional ReplicationManagerStatusResponseProto replicationManagerStatusResponse = 23; - enum Status { - OK = 1; - CONTAINER_ALREADY_EXISTS = 2; - CONTAINER_IS_MISSING = 3; - } -} - -enum Type { - - AllocateContainer = 1; - GetContainer = 2; - GetContainerWithPipeline = 3; - ListContainer = 4; - DeleteContainer = 5; - QueryNode = 6; - NotifyObjectStageChange = 7; - AllocatePipeline = 8; - ListPipelines = 9; - ActivatePipeline = 10; - DeactivatePipeline = 11; - ClosePipeline = 12; - GetScmInfo = 13; - InSafeMode = 14; - ForceExitSafeMode = 15; - StartReplicationManager = 16; - StopReplicationManager = 17; - GetReplicationManagerStatus = 18; -} - -/** -* Request send to SCM asking where the container should be created. -*/ -message ContainerRequestProto { - // Ozone only support replication of either 1 or 3. - required ReplicationFactor replicationFactor = 2; - required ReplicationType replicationType = 3; - required string owner = 4; - optional string traceID = 5; -} - -/** - * Reply from SCM indicating that the container. - */ -message ContainerResponseProto { - enum Error { - success = 1; - errorContainerAlreadyExists = 2; - errorContainerMissing = 3; - } - required Error errorCode = 1; - required ContainerWithPipeline containerWithPipeline = 2; - optional string errorMessage = 3; -} - -message GetContainerRequestProto { - required int64 containerID = 1; - optional string traceID = 2; - -} - -message GetContainerResponseProto { - required ContainerInfoProto containerInfo = 1; -} - -message GetContainerWithPipelineRequestProto { - required int64 containerID = 1; - optional string traceID = 2; - -} - -message GetContainerWithPipelineResponseProto { - required ContainerWithPipeline containerWithPipeline = 1; -} - -message SCMListContainerRequestProto { - required uint32 count = 1; - optional uint64 startContainerID = 2; - optional string traceID = 3; -} - -message SCMListContainerResponseProto { - repeated ContainerInfoProto containers = 1; -} - -message SCMDeleteContainerRequestProto { - required int64 containerID = 1; - optional string traceID = 2; - -} - -message SCMDeleteContainerResponseProto { - // Empty response -} - -message ObjectStageChangeRequestProto { - enum Type { - container = 1; - pipeline = 2; - } - // delete/copy operation may be added later - enum Op { - create = 1; - close = 2; - } - enum Stage { - begin = 1; - complete = 2; - } - required int64 id = 1; - required Type type = 2; - required Op op= 3; - required Stage stage = 4; - optional string traceID = 5; -} - -message ObjectStageChangeResponseProto { - // Empty response -} - -/* - NodeQueryRequest sends a request to SCM asking to send a list of nodes that - match the NodeState that we are requesting. -*/ -message NodeQueryRequestProto { - required NodeState state = 1; - required QueryScope scope = 2; - optional string poolName = 3; // if scope is pool, then pool name is needed. - optional string traceID = 4; -} - -message NodeQueryResponseProto { - repeated Node datanodes = 1; -} - -/** - Request to create a replication pipeline. - */ -message PipelineRequestProto { - required ReplicationType replicationType = 1; - required ReplicationFactor replicationFactor = 2; - - // if datanodes are specified then pipelines are created using those - // datanodes. - optional NodePool nodePool = 3; - optional string pipelineID = 4; - optional string traceID = 5; -} - -message PipelineResponseProto { - enum Error { - success = 1; - errorPipelineAlreadyExists = 2; - } - required Error errorCode = 1; - optional Pipeline pipeline = 2; - optional string errorMessage = 3; -} - -message ListPipelineRequestProto { - optional string traceID = 1; -} - -message ListPipelineResponseProto { - repeated Pipeline pipelines = 1; -} - -message ActivatePipelineRequestProto { - required PipelineID pipelineID = 1; - optional string traceID = 2; -} - -message ActivatePipelineResponseProto { -} - -message DeactivatePipelineRequestProto { - required PipelineID pipelineID = 1; - optional string traceID = 2; -} - -message DeactivatePipelineResponseProto { -} - -message ClosePipelineRequestProto { - required PipelineID pipelineID = 1; - optional string traceID = 2; - -} - -message ClosePipelineResponseProto { -} - -message InSafeModeRequestProto { - optional string traceID = 1; -} - -message InSafeModeResponseProto { - required bool inSafeMode = 1; -} - -message ForceExitSafeModeRequestProto { - optional string traceID = 1; -} - -message ForceExitSafeModeResponseProto { - required bool exitedSafeMode = 1; -} - -message StartReplicationManagerRequestProto { - optional string traceID = 1; -} - -message StartReplicationManagerResponseProto { -} - -message StopReplicationManagerRequestProto { - optional string traceID = 1; -} - -message StopReplicationManagerResponseProto { -} - -message ReplicationManagerStatusRequestProto { - optional string traceID = 1; -} - -message ReplicationManagerStatusResponseProto { - required bool isRunning = 1; -} - -/** - * Protocol used from an HDFS node to StorageContainerManager. See the request - * and response messages for details of the RPC calls. - */ -service StorageContainerLocationProtocolService { - rpc submitRequest (ScmContainerLocationRequest) returns (ScmContainerLocationResponse); - -} diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto deleted file mode 100644 index d2bb355ff8a4a..0000000000000 --- a/hadoop-hdds/common/src/main/proto/hdds.proto +++ /dev/null @@ -1,249 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and unstable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *unstable* .proto interface. - */ - -option java_package = "org.apache.hadoop.hdds.protocol.proto"; -option java_outer_classname = "HddsProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.hdds; - -message DatanodeDetailsProto { - required string uuid = 1; // UUID assigned to the Datanode. - required string ipAddress = 2; // IP address - required string hostName = 3; // hostname - repeated Port ports = 4; - optional string certSerialId = 5; // Certificate serial id. - // network name, can be Ip address or host name, depends - optional string networkName = 6; - optional string networkLocation = 7; // Network topology location -} - -/** - Proto message encapsulating information required to uniquely identify a - OzoneManager. -*/ -message OzoneManagerDetailsProto { - required string uuid = 1; // UUID assigned to the OzoneManager. - required string ipAddress = 2; // IP address of OM. - required string hostName = 3; // Hostname of OM. - repeated Port ports = 4; -} - -message Port { - required string name = 1; - required uint32 value = 2; -} - -message PipelineID { - required string id = 1; -} - -enum PipelineState { - PIPELINE_ALLOCATED = 1; - PIPELINE_OPEN = 2; - PIPELINE_DORMANT = 3; - PIPELINE_CLOSED = 4; -} - -message Pipeline { - required string leaderID = 1; - repeated DatanodeDetailsProto members = 2; - // TODO: remove the state and leaderID from this class - optional PipelineState state = 3 [default = PIPELINE_ALLOCATED]; - optional ReplicationType type = 4 [default = STAND_ALONE]; - optional ReplicationFactor factor = 5 [default = ONE]; - required PipelineID id = 6; - repeated uint32 memberOrders = 7; -} - -message KeyValue { - required string key = 1; - optional string value = 2; -} - -/** - * Type of the node. - */ -enum NodeType { - OM = 1; // Ozone Manager - SCM = 2; // Storage Container Manager - DATANODE = 3; // DataNode -} - -// Should we rename NodeState to DatanodeState? -/** - * Enum that represents the Node State. This is used in calls to getNodeList - * and getNodeCount. - */ -enum NodeState { - HEALTHY = 1; - STALE = 2; - DEAD = 3; - DECOMMISSIONING = 4; - DECOMMISSIONED = 5; -} - -enum QueryScope { - CLUSTER = 1; - POOL = 2; -} - -message Node { - required DatanodeDetailsProto nodeID = 1; - repeated NodeState nodeStates = 2; -} - -message NodePool { - repeated Node nodes = 1; -} - -/** - * LifeCycleState for SCM object creation state machine: - * ->Allocated: allocated on SCM but clean has not started creating it yet. - * ->Creating: allocated and assigned to client to create but not ack-ed yet. - * ->Open: allocated on SCM and created on datanodes and ack-ed by a client. - * ->Close: container closed due to space all used or error? - * ->Timeout -> container failed to create on datanodes or ack-ed by client. - * ->Deleting(TBD) -> container will be deleted after timeout - * 1. ALLOCATE-ed containers on SCM can't serve key/block related operation - * until ACK-ed explicitly which changes the state to OPEN. - * 2. Only OPEN/CLOSED containers can serve key/block related operation. - * 3. ALLOCATE-ed containers that are not ACK-ed timely will be TIMEOUT and - * CLEANUP asynchronously. - */ - -enum LifeCycleState { - OPEN = 1; - CLOSING = 2; - QUASI_CLOSED = 3; - CLOSED = 4; - DELETING = 5; - DELETED = 6; // object is deleted. -} - -enum LifeCycleEvent { - FINALIZE = 1; - QUASI_CLOSE = 2; - CLOSE = 3; // !!Event after this has not been used yet. - FORCE_CLOSE = 4; - DELETE = 5; - CLEANUP = 6; -} - -message ContainerInfoProto { - required int64 containerID = 1; - required LifeCycleState state = 2; - optional PipelineID pipelineID = 3; - required uint64 usedBytes = 4; - required uint64 numberOfKeys = 5; - optional int64 stateEnterTime = 6; - required string owner = 7; - optional int64 deleteTransactionId = 8; - optional int64 sequenceId = 9; - required ReplicationFactor replicationFactor = 10; - required ReplicationType replicationType = 11; -} - -message ContainerWithPipeline { - required ContainerInfoProto containerInfo = 1; - required Pipeline pipeline = 2; -} - -message GetScmInfoRequestProto { - optional string traceID = 1; -} - -message GetScmInfoResponseProto { - required string clusterId = 1; - required string scmId = 2; -} - - -enum ReplicationType { - RATIS = 1; - STAND_ALONE = 2; - CHAINED = 3; -} - -enum ReplicationFactor { - ONE = 1; - THREE = 3; -} - -enum ScmOps { - allocateBlock = 1; - keyBlocksInfoList = 2; - getScmInfo = 3; - deleteBlock = 4; - createReplicationPipeline = 5; - allocateContainer = 6; - getContainer = 7; - getContainerWithPipeline = 8; - listContainer = 9; - deleteContainer = 10; - queryNode = 11; -} - -message ExcludeListProto { - repeated string datanodes = 1; - repeated int64 containerIds = 2; - repeated PipelineID pipelineIds = 3; -} - -/** - * Block ID that uniquely identify a block by SCM. - */ -message ContainerBlockID { - required int64 containerID = 1; - required int64 localID = 2; -} - - -/** - * Information for the Hdds block token. - * When adding further fields, make sure they are optional as they would - * otherwise not be backwards compatible. - */ -message BlockTokenSecretProto { - /** - * File access permissions mode. - */ - enum AccessModeProto { - READ = 1; - WRITE = 2; - COPY = 3; - DELETE = 4; - } - required string ownerId = 1; - required string blockId = 2; - required uint64 expiryDate = 3; - required string omCertSerialId = 4; - repeated AccessModeProto modes = 5; - required uint64 maxLength = 6; -} - -message BlockID { - required ContainerBlockID containerBlockID = 1; - optional uint64 blockCommitSequenceId = 2 [default = 0]; -} diff --git a/hadoop-hdds/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor b/hadoop-hdds/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor deleted file mode 100644 index f29efdab384d1..0000000000000 --- a/hadoop-hdds/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.hdds.conf.ConfigFileGenerator diff --git a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties b/hadoop-hdds/common/src/main/resources/hdds-version-info.properties deleted file mode 100644 index 2cbd817ebbf91..0000000000000 --- a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties +++ /dev/null @@ -1,26 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -version=${declared.hdds.version} -revision=${version-info.scm.commit} -branch=${version-info.scm.branch} -user=${user.name} -date=${version-info.build.time} -url=${version-info.scm.uri} -srcChecksum=${version-info.source.md5} -protocVersion=${protobuf.version} diff --git a/hadoop-hdds/common/src/main/resources/network-topology-default.xml b/hadoop-hdds/common/src/main/resources/network-topology-default.xml deleted file mode 100644 index f86597cdeea18..0000000000000 --- a/hadoop-hdds/common/src/main/resources/network-topology-default.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - 1 - - - - 1 - Root - - - - - rack - - 1 - - InnerNode - - /default-rack - - - - 0 - Leaf - - - - /datacenter/rack/node - - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/resources/network-topology-default.yaml b/hadoop-hdds/common/src/main/resources/network-topology-default.yaml deleted file mode 100644 index 561869fb43b54..0000000000000 --- a/hadoop-hdds/common/src/main/resources/network-topology-default.yaml +++ /dev/null @@ -1,61 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- -# Cost: The cost of crossing this layer. -# The value should be positive integer or 0. This field is optional. -# When it's not defined, it's value is default "1". -cost: 1 - -# The prefix of this layer. -# If the prefix is "dc", then every name in this layer should start with "dc", -# such as "dc1", "dc2". -# Note that unlike XML schema, the prefix must be specified explicitly if the type is InnerNode. -prefix: / - -# Layer type, optional field, default value InnerNode. -# Current value range : {ROOT, INNER_NODE, LEAF_NODE} -type: ROOT - -# Layer name -defaultName: root - -# Sub layer -# The sub layer property defines as a list which can reflect a node tree, though -# in schema template it always has only one child. -sublayer: - - - cost: 1 - prefix: dc - defaultName: datacenter - type: INNER_NODE - sublayer: - - - cost: 1 - prefix: rack - defaultName: rack - type: INNER_NODE - sublayer: - - - cost: 1 - prefix: ng - defaultName: nodegroup - type: INNER_NODE - sublayer: - - - defaultName: node - type: LEAF_NODE - prefix: node -... \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/resources/network-topology-nodegroup.xml b/hadoop-hdds/common/src/main/resources/network-topology-nodegroup.xml deleted file mode 100644 index b43ebd5d15373..0000000000000 --- a/hadoop-hdds/common/src/main/resources/network-topology-nodegroup.xml +++ /dev/null @@ -1,74 +0,0 @@ - - - - - - 1 - - - - 1 - Root - - - - - rack - - 1 - - InnerNode - - /default-rack - - - ng - 1 - InnerNode - /default-nodegroup - - - - 0 - Leaf - - - - /datacenter/rack/nodegroup/node - - false - - diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml deleted file mode 100644 index b0a59fa209ccb..0000000000000 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ /dev/null @@ -1,2504 +0,0 @@ - - - - - - - - - - - - - - - - ozone.container.cache.size - 1024 - PERFORMANCE, CONTAINER, STORAGE - The open container is cached on the data node side. We maintain - an LRU - cache for caching the recently used containers. This setting controls the - size of that cache. - - - - dfs.container.ipc - 9859 - OZONE, CONTAINER, MANAGEMENT - The ipc port number of container. - - - dfs.container.ipc.random.port - false - OZONE, DEBUG, CONTAINER - Allocates a random free port for ozone container. This is used - only while - running unit tests. - - - - dfs.container.chunk.write.sync - false - OZONE, CONTAINER, MANAGEMENT - Determines whether the chunk writes in the container happen as - sync I/0 or buffered I/O operation. - - - - dfs.container.ratis.statemachinedata.sync.timeout - 10s - OZONE, DEBUG, CONTAINER, RATIS - Timeout for StateMachine data writes by Ratis. - - - - dfs.container.ratis.statemachinedata.sync.retries - -1 - OZONE, DEBUG, CONTAINER, RATIS - Number of times the WriteStateMachineData op will be tried - before failing, if this value is -1, then this retries indefinitely. - - - - dfs.container.ratis.log.queue.num-elements - 1024 - OZONE, DEBUG, CONTAINER, RATIS - Limit for the number of operations in Ratis Log Worker. - - - - dfs.container.ratis.log.queue.byte-limit - 4GB - OZONE, DEBUG, CONTAINER, RATIS - Byte limit for Ratis Log Worker queue. - - - - dfs.container.ratis.log.appender.queue.num-elements - 1 - OZONE, DEBUG, CONTAINER, RATIS - Limit for number of append entries in ratis leader's - log appender queue. - - - - dfs.container.ratis.log.appender.queue.byte-limit - 32MB - OZONE, DEBUG, CONTAINER, RATIS - Byte limit for ratis leader's log appender queue. - - - - dfs.container.ratis.log.purge.gap - 1000000 - OZONE, DEBUG, CONTAINER, RATIS - Purge gap between the last purged commit index - and the current index, when the leader decides to purge its log. - - - - dfs.container.ratis.datanode.storage.dir - - OZONE, CONTAINER, STORAGE, MANAGEMENT, RATIS - This directory is used for storing Ratis metadata like logs. If - this is - not set then default metadata dirs is used. A warning will be logged if - this not set. Ideally, this should be mapped to a fast disk like an SSD. - - - - hdds.datanode.dir - - OZONE, CONTAINER, STORAGE, MANAGEMENT - Determines where on the local filesystem HDDS data will be - stored. Defaults to dfs.datanode.data.dir if not specified. - The directories should be tagged with corresponding storage types - ([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for storage policies. The default - storage type will be DISK if the directory does not have a storage type - tagged explicitly. - - - - hdds.datanode.volume.choosing.policy - - OZONE, CONTAINER, STORAGE, MANAGEMENT - - The class name of the policy for choosing volumes in the list of - directories. Defaults to - org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy. - This volume choosing policy selects volumes in a round-robin order. - - - - dfs.container.ratis.enabled - false - OZONE, MANAGEMENT, PIPELINE, RATIS - Ozone supports different kinds of replication pipelines. Ratis - is one of - the replication pipeline supported by ozone. - - - - dfs.container.ratis.ipc - 9858 - OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT - The ipc port number of container. - - - dfs.container.ratis.ipc.random.port - false - OZONE,DEBUG - Allocates a random free port for ozone ratis port for the - container. This - is used only while running unit tests. - - - - dfs.container.ratis.rpc.type - GRPC - OZONE, RATIS, MANAGEMENT - Ratis supports different kinds of transports like netty, GRPC, - Hadoop RPC - etc. This picks one of those for this cluster. - - - - dfs.ratis.snapshot.threshold - 10000 - OZONE, RATIS - Number of transactions after which a ratis snapshot should be - taken. - - - - dfs.container.ratis.statemachine.max.pending.apply-transactions - 10000 - OZONE, RATIS - Maximum number of pending apply transactions in a data - pipeline. The default value is kept same as default snapshot threshold - dfs.ratis.snapshot.threshold. - - - - dfs.container.ratis.num.write.chunk.threads - 60 - OZONE, RATIS, PERFORMANCE - Maximum number of threads in the thread pool that Ratis - will use for writing chunks (60 by default). - - - - dfs.container.ratis.leader.num.pending.requests - 4096 - OZONE, RATIS, PERFORMANCE - Maximum number of pending requests after which the leader - starts rejecting requests from client. - - - - dfs.container.ratis.replication.level - MAJORITY - OZONE, RATIS - Replication level to be used by datanode for submitting a - container command to ratis. Available replication levels are ALL and - MAJORTIY, MAJORITY is used as the default replication level. - - - - dfs.container.ratis.num.container.op.executors - 10 - OZONE, RATIS, PERFORMANCE - Number of executors that will be used by Ratis to execute - container ops.(10 by default). - - - - dfs.container.ratis.segment.size - 1MB - OZONE, RATIS, PERFORMANCE - The size of the raft segment used by Apache Ratis on datanodes. - (1 MB by default) - - - - dfs.container.ratis.segment.preallocated.size - 16KB - OZONE, RATIS, PERFORMANCE - The size of the buffer which is preallocated for raft segment - used by Apache Ratis on datanodes.(16 KB by default) - - - - dfs.container.ratis.statemachine.cache.expiry.interval - 10s - OZONE, RATIS, PERFORMANCE - The interval till which the stateMachine data in ratis - will be cached inside the ContainerStateMachine. - - - - dfs.ratis.client.request.timeout.duration - 3s - OZONE, RATIS, MANAGEMENT - The timeout duration for ratis client request.It should be - set greater than leader election timeout in Ratis. - - - - dfs.ratis.client.request.max.retries - 180 - OZONE, RATIS, MANAGEMENT - Number of retries for ratis client request. - - - dfs.ratis.client.request.retry.interval - 1000ms - OZONE, RATIS, MANAGEMENT - Interval between successive retries for a ratis client request. - - - - dfs.ratis.server.retry-cache.timeout.duration - 600000ms - OZONE, RATIS, MANAGEMENT - Retry Cache entry timeout for ratis server. - - - dfs.ratis.server.request.timeout.duration - 3s - OZONE, RATIS, MANAGEMENT - The timeout duration for ratis server request. - - - dfs.ratis.leader.election.minimum.timeout.duration - 5s - OZONE, RATIS, MANAGEMENT - The minimum timeout duration for ratis leader election. - Default is 5s. - - - - dfs.ratis.server.failure.duration - 120s - OZONE, RATIS, MANAGEMENT - The timeout duration for ratis server failure detection, - once the threshold has reached, the ratis state machine will be informed - about the failure in the ratis ring - - - - hdds.node.report.interval - 60000ms - OZONE, CONTAINER, MANAGEMENT - Time interval of the datanode to send node report. Each - datanode periodically send node report to SCM. Unit could be - defined with postfix (ns,ms,s,m,h,d) - - - hdds.container.report.interval - 60000ms - OZONE, CONTAINER, MANAGEMENT - Time interval of the datanode to send container report. Each - datanode periodically send container report to SCM. Unit could be - defined with postfix (ns,ms,s,m,h,d) - - - hdds.command.status.report.interval - 60000ms - OZONE, CONTAINER, MANAGEMENT - Time interval of the datanode to send status of command - execution. Each datanode periodically the execution status of commands - received from SCM to SCM. Unit could be defined with postfix - (ns,ms,s,m,h,d) - - - hdds.pipeline.report.interval - 60000ms - OZONE, PIPELINE, MANAGEMENT - Time interval of the datanode to send pipeline report. Each - datanode periodically send pipeline report to SCM. Unit could be - defined with postfix (ns,ms,s,m,h,d) - - - - - hdds.prometheus.endpoint.enabled - true - OZONE, MANAGEMENT - Enable prometheus compatible metric page on the HTTP - servers. - - - - - hdds.profiler.endpoint.enabled - false - OZONE, MANAGEMENT - Enable /prof java profiler servlet page on HTTP server. - - - - - - ozone.administrators - - OZONE, SECURITY - Ozone administrator users delimited by the comma. - If not set, only the user who launches an ozone service will be the admin - user. This property must be set if ozone services are started by different - users. Otherwise, the RPC layer will reject calls from other servers which - are started by users not in the list. - - - - ozone.block.deleting.container.limit.per.interval - 10 - OZONE, PERFORMANCE, SCM - A maximum number of containers to be scanned by block deleting - service per - time interval. The block deleting service spawns a thread to handle block - deletions in a container. This property is used to throttle the number of - threads spawned for block deletions. - - - - ozone.block.deleting.limit.per.task - 1000 - OZONE, PERFORMANCE, SCM - A maximum number of blocks to be deleted by block deleting - service per - time interval. This property is used to throttle the actual number of - block deletions on a data node per container. - - - - ozone.block.deleting.service.interval - 1m - OZONE, PERFORMANCE, SCM - Time interval of the block deleting service. - The block deleting service runs on each datanode periodically and - deletes blocks queued for deletion. Unit could be defined with - postfix (ns,ms,s,m,h,d) - - - - ozone.block.deleting.service.timeout - 300000ms - OZONE, PERFORMANCE, SCM - A timeout value of block deletion service. If this is set - greater than 0, - the service will stop waiting for the block deleting completion after this - time. If timeout happens to a large proportion of block deletion, this - needs to be increased with ozone.block.deleting.limit.per.task. This - setting supports multiple time unit suffixes as described in - dfs.heartbeat.interval. If no suffix is specified, then milliseconds is - assumed. - - - - ozone.UnsafeByteOperations.enabled - true - OZONE, PERFORMANCE, CLIENT - It specifies whether to use unsafe or safe buffer to byteString - copy. - - - - ozone.client.connection.timeout - 5000ms - OZONE, PERFORMANCE, CLIENT - Connection timeout for Ozone client in milliseconds. - - - - ozone.client.stream.buffer.flush.size - 64MB - OZONE, CLIENT - Size which determines at what buffer position , a partial - flush will be initiated during write. It should be ideally a multiple - of chunkSize. - - - - ozone.client.stream.buffer.max.size - 128MB - OZONE, CLIENT - Size which determines at what buffer position, - write call be blocked till acknowledgement of the first partial flush - happens by all servers. - - - - ozone.client.watch.request.timeout - 30s - OZONE, CLIENT - Timeout for the watch API in Ratis client to acknowledge - a particular request getting replayed to all servers. - - - - ozone.client.max.retries - 100 - OZONE, CLIENT - Maximum number of retries by Ozone Client on encountering - exception while writing a key. - - - - ozone.client.retry.interval - 0ms - OZONE, CLIENT - Indicates the time duration a client will wait before - retrying a write key request on encountering an exception. By default - there is no wait. - - - - ozone.client.socket.timeout - 5000ms - OZONE, CLIENT - Socket timeout for Ozone client. Unit could be defined with - postfix (ns,ms,s,m,h,d) - - - ozone.enabled - false - OZONE, REQUIRED - - Status of the Ozone Object Storage service is enabled. - Set to true to enable Ozone. - Set to false to disable Ozone. - Unless this value is set to true, Ozone services will not be started in - the cluster. - - Please note: By default ozone is disabled on a hadoop cluster. - - - - ozone.key.deleting.limit.per.task - 1000 - OM, PERFORMANCE - - A maximum number of keys to be scanned by key deleting service - per time interval in OM. Those keys are sent to delete metadata and - generate transactions in SCM for next async deletion between SCM - and DataNode. - - - - ozone.om.service.ids - - OM, HA - - Comma-separated list of OM service Ids. - - If not set, the default value of "om-service-value" is assigned as the - OM service ID. - - - - ozone.om.nodes.EXAMPLEOMSERVICEID - - OM, HA - - Comma-separated list of OM node Ids for a given OM service ID (eg. - EXAMPLEOMSERVICEID). The OM service ID should be the value (one of the - values if there are multiple) set for the parameter ozone.om.service.ids. - - Unique identifiers for each OM Node, delimited by commas. This will be - used by OzoneManagers in HA setup to determine all the OzoneManagers - belonging to the same OMservice in the cluster. For example, if you - used “omService1” as the OM service ID previously, and you wanted to - use “om1”, “om2” and "om3" as the individual IDs of the OzoneManagers, - you would configure a property ozone.om.nodes.omService1, and its value - "om1,om2,om3". - - - - ozone.om.node.id - - OM, HA - - The ID of this OM node. If the OM node ID is not configured it - is determined automatically by matching the local node's address - with the configured address. - - If node ID is not deterministic from the configuration, then it is set - to the OmId from the OM version file. - - - - ozone.om.address - 0.0.0.0:9862 - OM, REQUIRED - - The address of the Ozone OM service. This allows clients to discover - the address of the OM. - - - - ozone.om.handler.count.key - 20 - OM, PERFORMANCE - - The number of RPC handler threads for OM service endpoints. - - - - ozone.om.http-address - 0.0.0.0:9874 - OM, MANAGEMENT - - The address and the base port where the OM web UI will listen on. - - If the port is 0, then the server will start on a free port. However, it - is best to specify a well-known port, so it is easy to connect and see - the OM management UI. - - - - ozone.om.http-bind-host - 0.0.0.0 - OM, MANAGEMENT - - The actual address the OM web server will bind to. If this optional - the address is set, it overrides only the hostname portion of - ozone.om.http-address. - - - - ozone.om.http.enabled - true - OM, MANAGEMENT - - Property to enable or disable OM web user interface. - - - - ozone.om.https-address - 0.0.0.0:9875 - OM, MANAGEMENT, SECURITY - - The address and the base port where the OM web UI will listen - on using HTTPS. - If the port is 0 then the server will start on a free port. - - - - ozone.om.https-bind-host - 0.0.0.0 - OM, MANAGEMENT, SECURITY - - The actual address the OM web server will bind to using HTTPS. - If this optional address is set, it overrides only the hostname portion of - ozone.om.https-address. - - - - ozone.om.keytab.file - - OM, SECURITY - - The keytab file for Kerberos authentication in OM. - - - - ozone.om.db.cache.size.mb - 128 - OM, PERFORMANCE - - The size of OM DB cache in MB that used for caching files. - This value is set to an abnormally low value in the default configuration. - That is to make unit testing easy. Generally, this value should be set to - something like 16GB or more, if you intend to use Ozone at scale. - - A large value for this key allows a proportionally larger amount of OM - metadata to be cached in memory. This makes OM operations faster. - - - - ozone.om.user.max.volume - 1024 - OM, MANAGEMENT - - The maximum number of volumes a user can have on a cluster.Increasing or - decreasing this number has no real impact on ozone cluster. This is - defined only for operational purposes. Only an administrator can create a - volume, once a volume is created there are no restrictions on the number - of buckets or keys inside each bucket a user can create. - - - - ozone.om.db.dirs - - OZONE, OM, STORAGE, PERFORMANCE - - Directory where the OzoneManager stores its metadata. This should - be specified as a single directory. If the directory does not - exist then the OM will attempt to create it. - - If undefined, then the OM will log a warning and fallback to - ozone.metadata.dirs. This fallback approach is not recommended for - production environments. - - - - ozone.metadata.dirs - - OZONE, OM, SCM, CONTAINER, STORAGE, REQUIRED - - This setting is the fallback location for SCM, OM, Recon and DataNodes - to store their metadata. This setting may be used only in test/PoC - clusters to simplify configuration. - - For production clusters or any time you care about performance, it is - recommended that ozone.om.db.dirs, ozone.scm.db.dirs and - dfs.container.ratis.datanode.storage.dir be configured separately. - - - - ozone.metastore.impl - RocksDB - OZONE, OM, SCM, CONTAINER, STORAGE - - Ozone metadata store implementation. Ozone metadata are well - distributed to multiple services such as ozoneManager, scm. They are stored in - some local key-value databases. This property determines which database - library to use. Supported value is either LevelDB or RocksDB. - - - - - ozone.metastore.rocksdb.statistics - OFF - OZONE, OM, SCM, STORAGE, PERFORMANCE - - The statistics level of the rocksdb store. If you use any value from - org.rocksdb.StatsLevel (eg. ALL or EXCEPT_DETAILED_TIMERS), the rocksdb - statistics will be exposed over JMX bean with the choosed setting. Set - it to OFF to not initialize rocksdb statistics at all. Please note that - collection of statistics could have 5-10% performance penalty. - Check the rocksdb documentation for more details. - - - - ozone.scm.db.dirs - - OZONE, SCM, STORAGE, PERFORMANCE - - Directory where the StorageContainerManager stores its metadata. - This should be specified as a single directory. If the directory - does not exist then the SCM will attempt to create it. - - If undefined, then the SCM will log a warning and fallback to - ozone.metadata.dirs. This fallback approach is not recommended for - production environments. - - - - ozone.scm.block.client.address - - OZONE, SCM - The address of the Ozone SCM block client service. If not - defined value of ozone.scm.client.address is used. - - - - ozone.scm.block.client.bind.host - 0.0.0.0 - OZONE, SCM - - The hostname or IP address used by the SCM block client - endpoint to bind. - - - - ozone.scm.block.client.port - 9863 - OZONE, SCM - - The port number of the Ozone SCM block client service. - - - - ozone.scm.block.deletion.max.retry - 4096 - OZONE, SCM - - SCM wraps up many blocks in a deletion transaction and sends that to data - node for physical deletion periodically. This property determines how many - times SCM is going to retry sending a deletion operation to the data node. - - - - ozone.scm.block.size - 256MB - OZONE, SCM - - The default size of a scm block. This is maps to the default - Ozone block size. - - - - ozone.scm.chunk.size - 16MB - OZONE, SCM, CONTAINER, PERFORMANCE - - The chunk size for reading/writing chunk operations in bytes. - - The chunk size defaults to 8MB. If the value configured is more than the - maximum size (16MB), it will be reset to the maximum size. This maps to - the network packet sizes and file write operations in the client to - datanode protocol. - - - - ozone.scm.client.address - - OZONE, SCM, REQUIRED - - The address of the Ozone SCM client service. This is a required setting. - - It is a string in the host:port format. The port number is optional - and defaults to 9860. - - - - ozone.scm.client.bind.host - 0.0.0.0 - OZONE, SCM, MANAGEMENT - The hostname or IP address used by the SCM client endpoint to - bind. - This setting is used by the SCM only and never used by clients. - - The setting can be useful in multi-homed setups to restrict the - availability of the SCM client service to a specific interface. - - The default is appropriate for most clusters. - - - - ozone.scm.client.port - 9860 - OZONE, SCM, MANAGEMENT - The port number of the Ozone SCM client service. - - - ozone.scm.keyvalue.container.deletion-choosing.policy - - org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy - - OZONE, MANAGEMENT - - The policy used for choosing desired keyvalue containers for block deletion. - Datanode selects some containers to process block deletion - in a certain interval defined by ozone.block.deleting.service.interval. - The number of containers to process in each interval is defined - by ozone.block.deleting.container.limit.per.interval. This property is - used to configure the policy applied while selecting containers. - There are two policies supporting now: - RandomContainerDeletionChoosingPolicy and - TopNOrderedContainerDeletionChoosingPolicy. - org.apache.hadoop.ozone.container.common.impl.RandomContainerDeletionChoosingPolicy - implements a simply random policy that to return a random list of - containers. - org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy - implements a policy that choosing top count number of containers in a - pending-deletion-blocks's num - based descending order. - - - - ozone.scm.container.placement.impl - - org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom - - OZONE, MANAGEMENT - - The full name of class which implements org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy. - The class decides which datanode will be used to host the container replica. If not set, - org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom will be used as default value. - - - - ozone.scm.pipeline.owner.container.count - 3 - OZONE, SCM, PIPELINE - Number of containers per owner in a pipeline. - - - - ozone.scm.container.size - 5GB - OZONE, PERFORMANCE, MANAGEMENT - - Default container size used by Ozone. - There are two considerations while picking this number. The speed at which - a container can be replicated, determined by the network speed and the - metadata that each container generates. So selecting a large number - creates less SCM metadata, but recovery time will be more. 5GB is a number - that maps to quick replication times in gigabit networks, but still - balances the amount of metadata. - - - - ozone.scm.datanode.address - - OZONE, MANAGEMENT - - The address of the Ozone SCM service used for internal - communication between the DataNodes and the SCM. - - It is a string in the host:port format. The port number is optional - and defaults to 9861. - - This setting is optional. If unspecified then the hostname portion - is picked from the ozone.scm.client.address setting and the - default service port of 9861 is chosen. - - - - ozone.scm.datanode.bind.host - - OZONE, MANAGEMENT - - The hostname or IP address used by the SCM service endpoint to - bind. - - - - ozone.scm.datanode.id.dir - - OZONE, MANAGEMENT - The path that datanodes will use to store the datanode ID. - If this value is not set, then datanode ID is created under the - metadata directory. - - - - ozone.scm.datanode.port - 9861 - OZONE, MANAGEMENT - - The port number of the Ozone SCM service. - - - - ozone.scm.db.cache.size.mb - 128 - OZONE, PERFORMANCE - SCM keeps track of the Containers in the cluster. This DB holds - the container metadata. This value is set to a small value to make the - unit - testing runs smooth. In production, we recommend a value of 16GB or - higher. This allows SCM to avoid disk I/O's while looking up the container - location. - - - - ozone.scm.dead.node.interval - 10m - OZONE, MANAGEMENT - - The interval between heartbeats before a node is tagged as dead. - - - - ozone.scm.handler.count.key - 10 - OZONE, MANAGEMENT, PERFORMANCE - - The number of RPC handler threads for each SCM service - endpoint. - - The default is appropriate for small clusters (tens of nodes). - - Set a value that is appropriate for the cluster size. Generally, HDFS - recommends RPC handler count is set to 20 * log2(Cluster Size) with an - upper limit of 200. However, SCM will not have the same amount of - traffic as Namenode, so a value much smaller than that will work well too. - - - - hdds.heartbeat.interval - 30s - OZONE, MANAGEMENT - - The heartbeat interval from a data node to SCM. Yes, - it is not three but 30, since most data nodes will heart beating via Ratis - heartbeats. If a client is not able to talk to a data node, it will notify - OM/SCM eventually. So a 30 second HB seems to work. This assumes that - replication strategy used is Ratis if not, this value should be set to - something smaller like 3 seconds. - ozone.scm.pipeline.close.timeout should also be adjusted accordingly, - if the default value for this config is not used. - - - - ozone.scm.heartbeat.log.warn.interval.count - 10 - OZONE, MANAGEMENT - - Defines how frequently we will log the missing of a heartbeat to SCM. - For example in the default case, we will write a warning message for each - ten consecutive heartbeats that we miss to SCM. This helps in reducing - clutter in a data node log, but trade off is that logs will have less of - this statement. - - - - ozone.scm.heartbeat.rpc-timeout - 1s - OZONE, MANAGEMENT - - Timeout value for the RPC from Datanode to SCM. - - - - ozone.scm.heartbeat.thread.interval - 3s - OZONE, MANAGEMENT - - When a heartbeat from the data node arrives on SCM, It is queued for - processing with the time stamp of when the heartbeat arrived. There is a - heartbeat processing thread inside SCM that runs at a specified interval. - This value controls how frequently this thread is run. - - There are some assumptions build into SCM such as this value should allow - the heartbeat processing thread to run at least three times more - frequently than heartbeats and at least five times more than stale node - detection time. If you specify a wrong value, SCM will gracefully refuse - to run. For more info look at the node manager tests in SCM. - - In short, you don't need to change this. - - - - ozone.scm.http-address - 0.0.0.0:9876 - OZONE, MANAGEMENT - - The address and the base port where the SCM web ui will listen on. - - If the port is 0 then the server will start on a free port. - - - - ozone.scm.http-bind-host - 0.0.0.0 - OZONE, MANAGEMENT - - The actual address the SCM web server will bind to. If this - optional address is set, it overrides only the hostname portion of - ozone.scm.http-address. - - - - ozone.scm.http.enabled - true - OZONE, MANAGEMENT - - Property to enable or disable SCM web ui. - - - - ozone.scm.https-address - 0.0.0.0:9877 - OZONE, MANAGEMENT - - The address and the base port where the SCM web UI will listen - on using HTTPS. - - If the port is 0 then the server will start on a free port. - - - - ozone.scm.https-bind-host - 0.0.0.0 - OZONE, MANAGEMENT - - The actual address the SCM web server will bind to using HTTPS. - If this optional address is set, it overrides only the hostname portion of - ozone.scm.https-address. - - - - ozone.scm.names - - OZONE, REQUIRED - - The value of this property is a set of DNS | DNS:PORT | IP - Address | IP:PORT. Written as a comma separated string. e.g. scm1, - scm2:8020, 7.7.7.7:7777. - This property allows datanodes to discover where SCM is, so that - datanodes can send heartbeat to SCM. - - - - ozone.scm.stale.node.interval - 5m - OZONE, MANAGEMENT - - The interval for stale node flagging. Please - see ozone.scm.heartbeat.thread.interval before changing this value. - - - - ozone.trace.enabled - false - OZONE, DEBUG - - Setting this flag to true dumps the HTTP request/ response in - the logs. Very useful when debugging REST protocol. - - - - - ozone.scm.container.creation.lease.timeout - 60s - OZONE, SCM - - Container creation timeout in milliseconds to be used by SCM. When - BEGIN_CREATE event happens the container is moved from ALLOCATED to - CREATING state, SCM will now wait for the configured amount of time - to get COMPLETE_CREATE event if it doesn't receive it will move the - container to DELETING. - - - - - ozone.key.preallocation.max.blocks - 64 - OZONE, OM, PERFORMANCE - - While allocating blocks from OM, this configuration limits the maximum - number of blocks being allocated. This configuration ensures that the - allocated block response do not exceed rpc payload limit. If client needs - more space for the write, separate block allocation requests will be made. - - - - - ozone.client.list.cache - 1000 - OZONE, PERFORMANCE - - Configuration property to configure the cache size of client list calls. - - - - - ozone.replication - 3 - OZONE, CLIENT - - Default replication value. The actual number of replications can be - specified when writing the key. The default is used if replication - is not specified. Supported values: 1 and 3. - - - - - ozone.replication.type - RATIS - OZONE, CLIENT - - Default replication type to be used while writing key into ozone. The - value can be specified when writing the key, default is used when - nothing is specified. Supported values: RATIS, STAND_ALONE and CHAINED. - - - - hdds.container.close.threshold - 0.9f - OZONE, DATANODE - - This determines the threshold to be used for closing a container. - When the container used percentage reaches this threshold, - the container will be closed. Value should be a positive, non-zero - percentage in float notation (X.Yf), with 1.0f meaning 100%. - - - - ozone.rest.client.http.connection.max - 100 - OZONE, CLIENT - - This defines the overall connection limit for the connection pool used in - RestClient. - - - - ozone.rest.client.http.connection.per-route.max - 20 - OZONE, CLIENT - - This defines the connection limit per one HTTP route/host. Total max - connection is limited by ozone.rest.client.http.connection.max property. - - - - - ozone.open.key.cleanup.service.interval.seconds - 86400 - OZONE, OM, PERFORMANCE - - A background job periodically checks open key entries and delete the expired ones. This entry controls the - interval of this cleanup check. - - - - - ozone.open.key.expire.threshold - 86400 - OZONE, OM, PERFORMANCE - - Controls how long an open key operation is considered active. Specifically, if a key - has been open longer than the value of this config entry, that open key is considered as - expired (e.g. due to client crash). Default to 24 hours. - - - - - hadoop.tags.custom - OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM, - CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,RECON - - - - ozone.tags.system - OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM, - CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,TOKEN,TLS,RECON - - - - - hdds.rest.rest-csrf.enabled - false - - If true, then enables Object Store REST server protection against - cross-site request forgery (CSRF). - - - - - hdds.rest.http-address - 0.0.0.0:9880 - The http address of Object Store REST server inside the - datanode. - - - - - hdds.rest.netty.high.watermark - 65535 - - High watermark configuration to Netty for Object Store REST server. - - - - - hdds.rest.netty.low.watermark - 32768 - - Low watermark configuration to Netty for Object Store REST server. - - - - - hdds.datanode.plugins - - - Comma-separated list of HDDS datanode plug-ins to be activated when - HDDS service starts as part of datanode. - - - - hdds.datanode.storage.utilization.warning.threshold - 0.75 - OZONE, SCM, MANAGEMENT - - If a datanode overall storage utilization exceeds more than this - value, a warning will be logged while processing the nodeReport in SCM. - - - - - hdds.datanode.storage.utilization.critical.threshold - 0.95 - OZONE, SCM, MANAGEMENT - - If a datanode overall storage utilization exceeds more than this - value, the datanode will be marked out of space. - - - - - hdds.command.status.report.interval - 30s - OZONE, DATANODE, MANAGEMENT - Time interval of the datanode to send status of commands - executed since last report. Unit could be defined with - postfix (ns,ms,s,m,h,d) - - - ozone.scm.pipeline.destroy.timeout - 66s - OZONE, SCM, PIPELINE - - Once a pipeline is closed, SCM should wait for the above configured time - before destroying a pipeline. - - - - ozone.scm.pipeline.creation.interval - 120s - OZONE, SCM, PIPELINE - - SCM schedules a fixed interval job using the configured interval to - create pipelines. - - - - - hdds.scm.safemode.threshold.pct - 0.99 - HDDS,SCM,OPERATION - % of containers which should have at least one - reported replica before SCM comes out of safe mode. - - - - - hdds.scm.wait.time.after.safemode.exit - 5m - HDDS,SCM,OPERATION - After exiting safemode, wait for configured interval of - time to start replication monitor and cleanup activities of unhealthy - pipelines. - - - - - hdds.scm.safemode.enabled - true - HDDS,SCM,OPERATION - Boolean value to enable or disable SCM safe mode. - - - - - hdds.scm.safemode.min.datanode - 1 - HDDS,SCM,OPERATION - Minimum DataNodes which should be registered to get SCM out of - safe mode. - - - - - hdds.scm.safemode.pipeline-availability.check - false - HDDS,SCM,OPERATION - - Boolean value to enable pipeline availability check during SCM safe mode. - - - - - hdds.scm.safemode.healthy.pipelie.pct - 0.10 - HDDS,SCM,OPERATION - - Percentage of healthy pipelines, where all 3 datanodes are reported in the - pipeline. - - - - - hdds.scm.safemode.atleast.one.node.reported.pipeline.pct - 0.90 - HDDS,SCM,OPERATION - - Percentage of pipelines, where at least one datanode is reported in the - pipeline. - - - - - hdds.container.scrub.enabled - false - DATANODE - - Boolean value to enable data and metadata scrubbing in the containers - running on each datanode. - - - - - hdds.container.action.max.limit - 20 - DATANODE - - Maximum number of Container Actions sent by the datanode to SCM in a - single heartbeat. - - - - - hdds.pipeline.action.max.limit - 20 - DATANODE - - Maximum number of Pipeline Actions sent by the datanode to SCM in a - single heartbeat. - - - - hdds.scm.watcher.timeout - 10m - OZONE, SCM, MANAGEMENT - - Timeout for the watchers of the HDDS SCM CommandWatchers. After this - duration the Copy/Delete container commands will be sent again to the - datanode unless the datanode confirms the completion. - - - - - hdds.db.profile - DISK - OZONE, OM, PERFORMANCE - This property allows user to pick a configuration - that tunes the RocksDB settings for the hardware it is running - on. Right now, we have SSD and DISK as profile options. - - - - hdds.datanode.replication.work.dir - DATANODE - Temporary which is used during the container replication - betweeen datanodes. Should have enough space to store multiple container - (in compressed format), but doesn't require fast io access such as SSD. - - - - - hdds.lock.max.concurrency - 100 - HDDS - Locks in HDDS/Ozone uses object pool to maintain active locks - in the system, this property defines the max limit for the locks that - will be maintained in the pool. - - - - - ozone.s3g.authentication.kerberos.principal - - OZONE, S3GATEWAY - The server principal used by Ozone S3Gateway server. This is - typically set to - HTTP/_HOST@REALM.TLD The SPNEGO server principal begins with the prefix - HTTP/ by convention. - - - - ozone.s3g.domain.name - - OZONE, S3GATEWAY - List of Ozone S3Gateway domain names. If multiple - domain names to be provided, they should be a "," seperated. - This parameter is only required when virtual host style pattern is - followed. - - - - ozone.s3g.http-address - 0.0.0.0:9878 - OZONE, S3GATEWAY - The address and the base port where the Ozone S3Gateway - Server will - listen on. - - - - ozone.s3g.http-bind-host - 0.0.0.0 - OZONE, S3GATEWAY - The actual address the HTTP server will bind to. If this optional address - is set, it overrides only the hostname portion of ozone.s3g.http-address. - This is useful for making the Ozone S3Gateway HTTP server listen on all - interfaces by setting it to 0.0.0.0. - - - - ozone.s3g.http.enabled - true - OZONE, S3GATEWAY - The boolean which enables the Ozone S3Gateway server - . - - - - ozone.s3g.https-address - - OZONE, S3GATEWAY - Ozone S3Gateway serverHTTPS server address and port - . - - - - ozone.s3g.https-bind-host - - OZONE, S3GATEWAY - The actual address the HTTPS server will bind to. If this optional address - is set, it overrides only the hostname portion of ozone.s3g.https-address. - This is useful for making the Ozone S3Gateway HTTPS server listen on all - interfaces by setting it to 0.0.0.0. - - - - ozone.s3g.keytab.file - - OZONE, S3GATEWAY - The keytab file used by the S3Gateway server to login as its - service principal. - - - - ozone.om.save.metrics.interval - 5m - OZONE, OM - Time interval used to store the omMetrics in to a - file. Background thread periodically stores the OM metrics in to a - file. Unit could be defined with postfix (ns,ms,s,m,h,d) - - - - ozone.security.enabled - false - OZONE, SECURITY - True if security is enabled for ozone. When this property is - true, hadoop.security.authentication should be Kerberos. - - - - - ozone.client.checksum.type - CRC32 - OZONE, CLIENT, MANAGEMENT - The checksum type [NONE/ CRC32/ CRC32C/ SHA256/ MD5] determines - which algorithm would be used to compute checksum for chunk data. - Default checksum type is SHA256. - - - - - ozone.client.bytes.per.checksum - 1MB - OZONE, CLIENT, MANAGEMENT - Checksum will be computed for every bytes per checksum number - of bytes and stored sequentially. The minimum value for this config is - 256KB. - - - - - ozone.client.verify.checksum - true - OZONE, CLIENT, MANAGEMENT - - Ozone client to verify checksum of the checksum blocksize data. - - - - - ozone.om.lock.fair - false - If this is true, the Ozone Manager lock will be used in Fair - mode, which will schedule threads in the order received/queued. If this is - false, uses non-fair ordering. See - java.util.concurrent.locks.ReentrantReadWriteLock - for more information on fair/non-fair locks. - - - - - ozone.om.ratis.enable - false - OZONE, OM, RATIS, MANAGEMENT - Property to enable or disable Ratis server on OM. - Please note - this is a temporary property to disable OM Ratis server. - - - - - ozone.om.ratis.port - 9872 - OZONE, OM, RATIS - - The port number of the OzoneManager's Ratis server. - - - - - ozone.om.ratis.rpc.type - GRPC - OZONE, OM, RATIS, MANAGEMENT - Ratis supports different kinds of transports like netty, GRPC, - Hadoop RPC etc. This picks one of those for this cluster. - - - - - ozone.om.ratis.storage.dir - - OZONE, OM, STORAGE, MANAGEMENT, RATIS - This directory is used for storing OM's Ratis metadata like - logs. If this is not set then default metadata dirs is used. A warning - will be logged if this not set. Ideally, this should be mapped to a - fast disk like an SSD. - If undefined, OM ratis storage dir will fallback to ozone.metadata.dirs. - This fallback approach is not recommended for production environments. - - - - - ozone.om.ratis.segment.size - 16KB - OZONE, OM, RATIS, PERFORMANCE - The size of the raft segment used by Apache Ratis on OM. - (16 KB by default) - - - - - ozone.om.ratis.segment.preallocated.size - 16KB - OZONE, OM, RATIS, PERFORMANCE - The size of the buffer which is preallocated for raft segment - used by Apache Ratis on OM.(16 KB by default) - - - - - ozone.om.ratis.log.appender.queue.num-elements - 1024 - OZONE, DEBUG, OM, RATIS - Number of operation pending with Raft's Log Worker. - - - - ozone.om.ratis.log.appender.queue.byte-limit - 32MB - OZONE, DEBUG, OM, RATIS - Byte limit for Raft's Log Worker queue. - - - - ozone.om.ratis.log.purge.gap - 1000000 - OZONE, OM, RATIS - The minimum gap between log indices for Raft server to purge - its log segments after taking snapshot. - - - - - ozone.om.ratis.snapshot.auto.trigger.threshold - 400000 - OZONE, DEBUG, OM, RATIS - The log index threshold after ratis will auto trigger - snapshot on the OM state machine. - - - - - ozone.om.ratis.server.request.timeout - 3s - OZONE, OM, RATIS, MANAGEMENT - The timeout duration for OM's ratis server request . - - - - ozone.om.ratis.server.retry.cache.timeout - 600000ms - OZONE, OM, RATIS, MANAGEMENT - Retry Cache entry timeout for OM's ratis server. - - - - ozone.om.ratis.minimum.timeout - 1s - OZONE, OM, RATIS, MANAGEMENT - The minimum timeout duration for OM's Ratis server rpc. - - - - - ozone.om.ratis.client.request.timeout.duration - 3s - OZONE, OM, RATIS, MANAGEMENT - The timeout duration for OM Ratis client request. - - - - ozone.om.ratis.client.request.max.retries - 180 - OZONE, OM, RATIS, MANAGEMENT - Number of retries for OM client request. - - - ozone.om.ratis.client.request.retry.interval - 100ms - OZONE, OM, RATIS, MANAGEMENT - Interval between successive retries for a OM client request. - - - - - ozone.om.leader.election.minimum.timeout.duration - 1s - OZONE, OM, RATIS, MANAGEMENT - The minimum timeout duration for OM ratis leader election. - Default is 1s. - - - - - ozone.om.ratis.server.failure.timeout.duration - 120s - OZONE, OM, RATIS, MANAGEMENT - The timeout duration for ratis server failure detection, - once the threshold has reached, the ratis state machine will be informed - about the failure in the ratis ring. - - - - - ozone.om.ratis.server.role.check.interval - 15s - OZONE, OM, RATIS, MANAGEMENT - The interval between OM leader performing a role - check on its ratis server. Ratis server informs OM if it - loses the leader role. The scheduled check is an secondary - check to ensure that the leader role is updated periodically - . - - - - ozone.om.ratis.snapshot.dir - - OZONE, OM, STORAGE, MANAGEMENT, RATIS - This directory is used for storing OM's snapshot - related files like the ratisSnapshotIndex and DB checkpoint from leader - OM. - If undefined, OM snapshot dir will fallback to ozone.om.ratis.storage.dir. - This fallback approach is not recommended for production environments. - - - - ozone.om.snapshot.provider.socket.timeout - 5000s - OZONE, OM, HA, MANAGEMENT - - Socket timeout for HTTP call made by OM Snapshot Provider to request - OM snapshot from OM Leader. - - - - ozone.om.snapshot.provider.connection.timeout - 5000s - OZONE, OM, HA, MANAGEMENT - - Connection timeout for HTTP call made by OM Snapshot Provider to request - OM snapshot from OM Leader. - - - - ozone.om.snapshot.provider.request.timeout - 5000ms - OZONE, OM, HA, MANAGEMENT - - Connection request timeout for HTTP call made by OM Snapshot Provider to - request OM snapshot from OM Leader. - - - - - ozone.acl.authorizer.class - org.apache.hadoop.ozone.security.acl.OzoneAccessAuthorizer - OZONE, SECURITY, ACL - Acl authorizer for Ozone. - - - - ozone.acl.enabled - false - OZONE, SECURITY, ACL - Key to enable/disable ozone acls. - - - hdds.scm.kerberos.keytab.file - - OZONE, SECURITY - The keytab file used by each SCM daemon to login as its - service principal. The principal name is configured with - hdds.scm.kerberos.principal. - - - - hdds.scm.kerberos.principal - - OZONE, SECURITY - The SCM service principal. Ex scm/_HOST@REALM.COM - - - - ozone.om.kerberos.keytab.file - - OZONE, SECURITY - The keytab file used by OzoneManager daemon to login as its - service principal. The principal name is configured with - ozone.om.kerberos.principal. - - - - ozone.om.kerberos.principal - - OZONE, SECURITY - The OzoneManager service principal. Ex om/_HOST@REALM.COM - - - - hdds.scm.http.kerberos.principal - HTTP/_HOST@EXAMPLE.COM - - - hdds.scm.http.kerberos.keytab - /etc/security/keytabs/HTTP.keytab - - - - ozone.om.http.kerberos.principal - HTTP/_HOST@EXAMPLE.COM - - OzoneManager http server kerberos principal. - - - - ozone.om.http.kerberos.keytab - /etc/security/keytabs/HTTP.keytab - - OzoneManager http server kerberos keytab. - - - - hdds.key.len - 2048 - SCM, HDDS, X509, SECURITY - - SCM CA key length. This is an algorithm-specific metric, such as modulus length, specified in number of bits. - - - - hdds.key.dir.name - keys - SCM, HDDS, X509, SECURITY - - Directory to store public/private key for SCM CA. This is relative to ozone/hdds meteadata dir. - - - - hdds.block.token.expiry.time - 1d - OZONE, HDDS, SECURITY, TOKEN - - Default value for expiry time of block token. This - setting supports multiple time unit suffixes as described in - dfs.heartbeat.interval. If no suffix is specified, then milliseconds is - assumed. - - - - - hdds.block.token.enabled - false - OZONE, HDDS, SECURITY, TOKEN - True if block tokens are enabled, else false. - - - hdds.x509.file.name - certificate.crt - OZONE, HDDS, SECURITY - Certificate file name. - - - hdds.grpc.tls.provider - OPENSSL - OZONE, HDDS, SECURITY, TLS - HDDS GRPC server TLS provider. - - - hdds.grpc.tls.enabled - false - OZONE, HDDS, SECURITY, TLS - If HDDS GRPC server TLS is enabled. - - - hdds.x509.default.duration - P365D - OZONE, HDDS, SECURITY - Default duration for which x509 certificates issued by SCM are - valid. The formats accepted are based on the ISO-8601 duration format - PnDTnHnMn.nS - - - hdds.x509.dir.name - certs - OZONE, HDDS, SECURITY - X509 certificate directory name. - - - hdds.x509.max.duration - P1865D - OZONE, HDDS, SECURITY - Max time for which certificate issued by SCM CA are valid. - . The formats accepted are based on the ISO-8601 duration format - PnDTnHnMn.nS - - - hdds.x509.signature.algorithm - SHA256withRSA - OZONE, HDDS, SECURITY - X509 signature certificate. - - - ozone.scm.security.handler.count.key - 2 - OZONE, HDDS, SECURITY - Threads configured for SCMSecurityProtocolServer. - - - ozone.scm.security.service.address - - OZONE, HDDS, SECURITY - Address of SCMSecurityProtocolServer. - - - ozone.scm.security.service.bind.host - 0.0.0.0 - OZONE, HDDS, SECURITY - SCM security server host. - - - ozone.scm.security.service.port - 9961 - OZONE, HDDS, SECURITY - SCM security server port. - - - - hdds.metadata.dir - - X509, SECURITY - - Absolute path to HDDS metadata dir. - - - - hdds.priv.key.file.name - private.pem - X509, SECURITY - - Name of file which stores private key generated for SCM CA. - - - - hdds.public.key.file.name - public.pem - X509, SECURITY - - Name of file which stores public key generated for SCM CA. - - - - ozone.manager.delegation.remover.scan.interval - 3600000 - - Time interval after which ozone secret manger scans for expired - delegation token. - - - - ozone.manager.delegation.token.renew-interval - 1d - - Default time interval after which ozone delegation token will - require renewal before any further use. - - - - ozone.manager.delegation.token.max-lifetime - 7d - - Default max time interval after which ozone delegation token will - not be renewed. - - - - - ozone.fs.isolated-classloader - - OZONE, OZONEFS - - Enable it for older hadoops to separate the classloading of all the - Ozone classes. With 'true' value, ozonefs can be used with older - hadoop versions as the hadoop3/ozone related classes are loaded by - an isolated classloader. - - Default depends from the used jar. true for ozone-filesystem-lib-legacy - jar and false for the ozone-filesystem-lib-current.jar - - - - ozone.manager.db.checkpoint.transfer.bandwidthPerSec - 0 - OZONE - - Maximum bandwidth used for Ozone Manager DB checkpoint download through - the servlet. - - - - - ozone.freon.http-address - 0.0.0.0:9884 - OZONE, MANAGEMENT - - The address and the base port where the FREON web ui will listen on. - If the port is 0 then the server will start on a free port. - - - - ozone.freon.http-bind-host - 0.0.0.0 - OZONE, MANAGEMENT - - The actual address the Freon web server will bind to. If this - optional address is set, it overrides only the hostname portion of - ozone.freon.http-address. - - - - ozone.freon.http.enabled - true - OZONE, MANAGEMENT - - Property to enable or disable FREON web ui. - - - - ozone.freon.https-address - 0.0.0.0:9885 - OZONE, MANAGEMENT - - The address and the base port where the Freon web server will listen - on using HTTPS. - If the port is 0 then the server will start on a free port. - - - - ozone.freon.https-bind-host - 0.0.0.0 - OZONE, MANAGEMENT - - The actual address the Freon web server will bind to using HTTPS. - If this optional address is set, it overrides only the hostname portion of - ozone.freon.http-address. - - - - ozone.freon.http.kerberos.principal - HTTP/_HOST@EXAMPLE.COM - SECURITY - - Security principal used by freon. - - - - ozone.freon.http.kerberos.keytab - /etc/security/keytabs/HTTP.keytab - SECURITY - - Keytab used by Freon. - - - - hdds.security.client.datanode.container.protocol.acl - * - SECURITY - - Comma separated list of users and groups allowed to access - client datanode container protocol. - - - - hdds.security.client.scm.block.protocol.acl - * - SECURITY - - Comma separated list of users and groups allowed to access - client scm block protocol. - - - - hdds.security.client.scm.certificate.protocol.acl - * - SECURITY - - Comma separated list of users and groups allowed to access - client scm certificate protocol. - - - - hdds.security.client.scm.container.protocol.acl - * - SECURITY - - Comma separated list of users and groups allowed to access - client scm container protocol. - - - - ozone.om.security.client.protocol.acl - * - SECURITY - - Comma separated list of users and groups allowed to access - client ozone manager protocol. - - - - - hdds.datanode.http.kerberos.principal - HTTP/_HOST@EXAMPLE.COM - HDDS, SECURITY, MANAGEMENT - - The kerberos principal for the datanode http server. - - - - hdds.datanode.http.kerberos.keytab - /etc/security/keytabs/HTTP.keytab - HDDS, SECURITY, MANAGEMENT - - The kerberos keytab file for datanode http server - - - - hdds.datanode.http-address - 0.0.0.0:9882 - HDDS, MANAGEMENT - - The address and the base port where the Datanode web ui will listen on. - If the port is 0 then the server will start on a free port. - - - - hdds.datanode.http-bind-host - 0.0.0.0 - HDDS, MANAGEMENT - - The actual address the Datanode web server will bind to. If this - optional address is set, it overrides only the hostname portion of - hdds.datanode.http-address. - - - - hdds.datanode.http.enabled - true - HDDS, MANAGEMENT - - Property to enable or disable Datanode web ui. - - - - hdds.datanode.https-address - 0.0.0.0:9883 - HDDS, MANAGEMENT, SECURITY - - The address and the base port where the Datanode web UI will listen - on using HTTPS. - - If the port is 0 then the server will start on a free port. - - - - hdds.datanode.https-bind-host - 0.0.0.0 - HDDS, MANAGEMENT, SECURITY - - The actual address the Datanode web server will bind to using HTTPS. - If this optional address is set, it overrides only the hostname portion of - hdds.datanode.http-address. - - - - ozone.client.retry.max.attempts - 10 - - Max retry attempts for Ozone RpcClient talking to OzoneManagers. - - - - ozone.client.failover.max.attempts - 15 - - Expert only. The number of client failover attempts that should be - made before the failover is considered failed. - - - - ozone.client.failover.sleep.base.millis - 500 - - Expert only. The time to wait, in milliseconds, between failover - attempts increases exponentially as a function of the number of - attempts made so far, with a random factor of +/- 50%. This option - specifies the base value used in the failover calculation. The - first failover will retry immediately. The 2nd failover attempt - will delay at least ozone.client.failover.sleep.base.millis - milliseconds. And so on. - - - - ozone.client.failover.sleep.max.millis - 15000 - - Expert only. The time to wait, in milliseconds, between failover - attempts increases exponentially as a function of the number of - attempts made so far, with a random factor of +/- 50%. This option - specifies the maximum value to wait between failovers. - Specifically, the time between two failover attempts will not - exceed +/- 50% of ozone.client.failover.sleep.max.millis - milliseconds. - - - - ozone.recon.http.enabled - true - RECON, MANAGEMENT - - Property to enable or disable Recon web user interface. - - - - ozone.recon.http-address - 0.0.0.0:9888 - RECON, MANAGEMENT - - The address and the base port where the Recon web UI will listen on. - - If the port is 0, then the server will start on a free port. However, it - is best to specify a well-known port, so it is easy to connect and see - the Recon management UI. - - - - ozone.recon.http-bind-host - 0.0.0.0 - RECON, MANAGEMENT - - The actual address the Recon server will bind to. If this optional - the address is set, it overrides only the hostname portion of - ozone.recon.http-address. - - - - ozone.recon.https-bind-host - 0.0.0.0 - RECON, MANAGEMENT, SECURITY - - The actual address the Recon web server will bind to using HTTPS. - If this optional address is set, it overrides only the hostname portion of - ozone.recon.https-address. - - - - ozone.recon.https-address - 0.0.0.0:9889 - RECON, MANAGEMENT, SECURITY - - The address and the base port where the Recon web UI will listen - on using HTTPS. If the port is 0 then the server will start on a free - port. - - - - ozone.recon.keytab.file - - RECON, SECURITY - - The keytab file for Kerberos authentication in Recon. - - - - ozone.recon.authentication.kerberos.principal - - RECON - The server principal used by Ozone Recon server. This is - typically set to HTTP/_HOST@REALM.TLD The SPNEGO server principal - begins with the prefix HTTP/ by convention. - - - - ozone.recon.container.db.cache.size.mb - 128 - RECON, PERFORMANCE - - The size of Recon DB cache in MB that used for caching files. - This value is set to an abnormally low value in the default configuration. - That is to make unit testing easy. Generally, this value should be set to - something like 16GB or more, if you intend to use Recon at scale. - - A large value for this key allows a proportionally larger amount of Recon - container DB to be cached in memory. This makes Recon Container-Key - operations faster. - - - - ozone.recon.db.dir - - OZONE, RECON, STORAGE, PERFORMANCE - - Directory where the Recon Server stores its metadata. This should - be specified as a single directory. If the directory does not - exist then the Recon will attempt to create it. - - If undefined, then the Recon will log a warning and fallback to - ozone.metadata.dirs. This fallback approach is not recommended for - production environments. - - - - ozone.scm.network.topology.schema.file - network-topology-default.xml - OZONE, MANAGEMENT - - The schema file defines the ozone network topology. We currently support - xml(default) and yaml format. Refer to the samples in the topology - awareness document for xml and yaml topology definition samples. - - - - ozone.network.topology.aware.read - false - OZONE, PERFORMANCE - - Whether to enable topology aware read to improve the read performance. - - - - ozone.recon.container.db.impl - RocksDB - OZONE, RECON, STORAGE - - Ozone Recon container DB store implementation.Supported value is either - LevelDB or RocksDB. - - - - ozone.recon.om.db.dir - - OZONE, RECON, STORAGE - - Directory where the Recon Server stores its OM snapshot DB. This should - be specified as a single directory. If the directory does not - exist then the Recon will attempt to create it. - - If undefined, then the Recon will log a warning and fallback to - ozone.metadata.dirs. This fallback approach is not recommended for - production environments. - - - - recon.om.connection.request.timeout - 5000 - OZONE, RECON, OM - - Connection request timeout in milliseconds for HTTP call made by Recon to - request OM DB snapshot. - - - - recon.om.connection.timeout - 5s - OZONE, RECON, OM - - Connection timeout for HTTP call in milliseconds made by Recon to request - OM snapshot. - - - - recon.om.socket.timeout - 5s - OZONE, RECON, OM - - Socket timeout in milliseconds for HTTP call made by Recon to request - OM snapshot. - - - - recon.om.snapshot.task.initial.delay - 1m - OZONE, RECON, OM - - Initial delay in MINUTES by Recon to request OM DB Snapshot. - - - - recon.om.snapshot.task.interval.delay - 10m - OZONE, RECON, OM - - Interval in MINUTES by Recon to request OM DB Snapshot. - - - - recon.om.snapshot.task.flush.param - false - OZONE, RECON, OM - - Request to flush the OM DB before taking checkpoint snapshot. - - - - hdds.tracing.enabled - true - OZONE, HDDS - - If enabled, tracing information is sent to tracing server. - - - - ozone.recon.sql.db.driver - org.sqlite.JDBC - OZONE, RECON - - Database driver class name available on the - Ozone Recon classpath. - - - - ozone.recon.sql.db.jdbc.url - jdbc:sqlite:/${ozone.recon.db.dir}/ozone_recon_sqlite.db - OZONE, RECON - - Ozone Recon SQL database jdbc url. - - - - ozone.recon.sql.db.username - - OZONE, RECON - - Ozone Recon SQL database username. - - - - ozone.recon.sql.db.password - - OZONE, RECON - - Ozone Recon database password. - - - - ozone.recon.sql.db.auto.commit - false - OZONE, RECON - - Sets the Ozone Recon database connection property of auto-commit to - true/false. - - - - ozone.recon.sql.db.conn.timeout - 30000 - OZONE, RECON - - Sets time in milliseconds before call to getConnection is timed out. - - - - ozone.recon.sql.db.conn.max.active - 1 - OZONE, RECON - - The max active connections to the SQL database. The default SQLite - database only allows single active connection, set this to a - reasonable value like 10, for external production database. - - - - ozone.recon.sql.db.conn.max.age - 1800 - OZONE, RECON - - Sets maximum time a connection can be active in seconds. - - - - ozone.recon.sql.db.conn.idle.max.age - 3600 - OZONE, RECON - - Sets maximum time to live for idle connection in seconds. - - - - ozone.recon.sql.db.conn.idle.test.period - 60 - OZONE, RECON - - This sets the time (in seconds), for a connection to remain idle before - sending a test query to the DB. This is useful to prevent a DB from - timing out connections on its end. - - - - ozone.recon.sql.db.conn.idle.test - SELECT 1 - OZONE, RECON - - The query to send to the DB to maintain keep-alives and test for dead - connections. - - - - ozone.recon.task.thread.count - 1 - OZONE, RECON - - The number of Recon Tasks that are waiting on updates from OM. - - - diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java deleted file mode 100644 index 75636106498eb..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds; - -import java.util.Optional; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Testing HddsUtils. - */ -public class TestHddsUtils { - - @Test - public void testGetHostName() { - Assert.assertEquals(Optional.of("localhost"), - HddsUtils.getHostName("localhost:1234")); - - Assert.assertEquals(Optional.of("localhost"), - HddsUtils.getHostName("localhost")); - - Assert.assertEquals(Optional.empty(), - HddsUtils.getHostName(":1234")); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java deleted file mode 100644 index f18fd5e50b6f6..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import java.util.concurrent.TimeUnit; - -/** - * Example configuration to test the configuration injection. - */ -@ConfigGroup(prefix = "ozone.scm.client") -public class SimpleConfiguration { - - private String clientAddress; - - private String bindHost; - - private boolean enabled; - - private int port = 1234; - - private long waitTime = 1; - - @Config(key = "address", defaultValue = "localhost", description = "Just " - + "for testing", tags = ConfigTag.MANAGEMENT) - public void setClientAddress(String clientAddress) { - this.clientAddress = clientAddress; - } - - @Config(key = "bind.host", defaultValue = "0.0.0.0", description = "Just " - + "for testing", tags = ConfigTag.MANAGEMENT) - public void setBindHost(String bindHost) { - this.bindHost = bindHost; - } - - @Config(key = "enabled", defaultValue = "true", description = "Just for " - + "testing", tags = ConfigTag.MANAGEMENT) - public void setEnabled(boolean enabled) { - this.enabled = enabled; - } - - @Config(key = "port", defaultValue = "9878", description = "Just for " - + "testing", tags = ConfigTag.MANAGEMENT) - public void setPort(int port) { - this.port = port; - } - - @Config(key = "wait", type = ConfigType.TIME, timeUnit = - TimeUnit.SECONDS, defaultValue = "10m", description = "Just for " - + "testing", tags = ConfigTag.MANAGEMENT) - public void setWaitTime(long waitTime) { - this.waitTime = waitTime; - } - - public String getClientAddress() { - return clientAddress; - } - - public String getBindHost() { - return bindHost; - } - - public boolean isEnabled() { - return enabled; - } - - public int getPort() { - return port; - } - - public long getWaitTime() { - return waitTime; - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java deleted file mode 100644 index 0a8047837aa2b..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.junit.Rule; -import org.junit.Before; -import org.junit.Test; -import org.junit.Assert; -import org.junit.rules.TemporaryFolder; - -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.util.concurrent.TimeUnit; - -/** - * Test class for OzoneConfiguration. - */ -public class TestOzoneConfiguration { - - private Configuration conf; - - @Rule - public TemporaryFolder tempConfigs = new TemporaryFolder(); - - @Before - public void setUp() throws Exception { - conf = new OzoneConfiguration(); - } - - private void startConfig(BufferedWriter out) throws IOException { - out.write("\n"); - out.write("\n"); - } - - private void endConfig(BufferedWriter out) throws IOException { - out.write("\n"); - out.flush(); - out.close(); - } - - @Test - public void testGetAllPropertiesByTags() throws Exception { - File coreDefault = tempConfigs.newFile("core-default-test.xml"); - File coreSite = tempConfigs.newFile("core-site-test.xml"); - try (BufferedWriter out = new BufferedWriter(new FileWriter(coreDefault))) { - startConfig(out); - appendProperty(out, "hadoop.tags.system", "YARN,HDFS,NAMENODE"); - appendProperty(out, "hadoop.tags.custom", "MYCUSTOMTAG"); - appendPropertyByTag(out, "dfs.cblock.trace.io", "false", "YARN"); - appendPropertyByTag(out, "dfs.replication", "1", "HDFS"); - appendPropertyByTag(out, "dfs.namenode.logging.level", "INFO", - "NAMENODE"); - appendPropertyByTag(out, "dfs.random.key", "XYZ", "MYCUSTOMTAG"); - endConfig(out); - - Path fileResource = new Path(coreDefault.getAbsolutePath()); - conf.addResource(fileResource); - Assert.assertEquals(conf.getAllPropertiesByTag("MYCUSTOMTAG") - .getProperty("dfs.random.key"), "XYZ"); - } - - try (BufferedWriter out = new BufferedWriter(new FileWriter(coreSite))) { - startConfig(out); - appendProperty(out, "dfs.random.key", "ABC"); - appendProperty(out, "dfs.replication", "3"); - appendProperty(out, "dfs.cblock.trace.io", "true"); - endConfig(out); - - Path fileResource = new Path(coreSite.getAbsolutePath()); - conf.addResource(fileResource); - } - - // Test if values are getting overridden even without tags being present - Assert.assertEquals("3", conf.getAllPropertiesByTag("HDFS") - .getProperty("dfs.replication")); - Assert.assertEquals("ABC", conf.getAllPropertiesByTag("MYCUSTOMTAG") - .getProperty("dfs.random.key")); - Assert.assertEquals("true", conf.getAllPropertiesByTag("YARN") - .getProperty("dfs.cblock.trace.io")); - } - - @Test - public void getConfigurationObject() { - OzoneConfiguration ozoneConfig = new OzoneConfiguration(); - ozoneConfig.set("ozone.scm.client.address", "address"); - ozoneConfig.set("ozone.scm.client.bind.host", "host"); - ozoneConfig.setBoolean("ozone.scm.client.enabled", true); - ozoneConfig.setInt("ozone.scm.client.port", 5555); - ozoneConfig.setTimeDuration("ozone.scm.client.wait", 10, TimeUnit.MINUTES); - - SimpleConfiguration configuration = - ozoneConfig.getObject(SimpleConfiguration.class); - - Assert.assertEquals("host", configuration.getBindHost()); - Assert.assertEquals("address", configuration.getClientAddress()); - Assert.assertEquals(true, configuration.isEnabled()); - Assert.assertEquals(5555, configuration.getPort()); - Assert.assertEquals(600, configuration.getWaitTime()); - } - - @Test - public void getConfigurationObjectWithDefault() { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - - SimpleConfiguration configuration = - ozoneConfiguration.getObject(SimpleConfiguration.class); - - Assert.assertEquals(true, configuration.isEnabled()); - Assert.assertEquals(9878, configuration.getPort()); - } - - - private void appendProperty(BufferedWriter out, String name, String val) - throws IOException { - this.appendProperty(out, name, val, false); - } - - private void appendProperty(BufferedWriter out, String name, String val, - boolean isFinal) throws IOException { - out.write(""); - out.write(""); - out.write(name); - out.write(""); - out.write(""); - out.write(val); - out.write(""); - if (isFinal) { - out.write("true"); - } - out.write("\n"); - } - - private void appendPropertyByTag(BufferedWriter out, String name, String val, - String tags) throws IOException { - this.appendPropertyByTag(out, name, val, false, tags); - } - - private void appendPropertyByTag(BufferedWriter out, String name, String val, - boolean isFinal, - String tag) throws IOException { - out.write(""); - out.write(""); - out.write(name); - out.write(""); - out.write(""); - out.write(val); - out.write(""); - if (isFinal) { - out.write("true"); - } - out.write(""); - out.write(tag); - out.write(""); - out.write("\n"); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/package-info.java deleted file mode 100644 index e72c902045b47..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains the OzoneConfiguration related tests. - */ -package org.apache.hadoop.hdds.conf; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java deleted file mode 100644 index bbe6ab7cca7a3..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.ratis; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutBlockRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; -import org.apache.hadoop.ozone.common.OzoneChecksumException; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.junit.Assert; -import org.junit.Test; - -import java.util.Random; -import java.util.UUID; -import java.util.function.BiFunction; - -/** Testing {@link ContainerCommandRequestMessage}. */ -public class TestContainerCommandRequestMessage { - static final Random RANDOM = new Random(); - - static ByteString newData(int length, Random random) { - final ByteString.Output out = ByteString.newOutput(); - for(int i = 0; i < length; i++) { - out.write(random.nextInt()); - } - return out.toByteString(); - } - - static ChecksumData checksum(ByteString data) { - try { - return new Checksum().computeChecksum(data.toByteArray()); - } catch (OzoneChecksumException e) { - throw new IllegalStateException(e); - } - } - - static ContainerCommandRequestProto newPutSmallFile( - BlockID blockID, ByteString data) { - final BlockData.Builder blockData - = BlockData.newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()); - final PutBlockRequestProto.Builder putBlockRequest - = PutBlockRequestProto.newBuilder() - .setBlockData(blockData); - final KeyValue keyValue = KeyValue.newBuilder() - .setKey("OverWriteRequested") - .setValue("true") - .build(); - final ChunkInfo chunk = ChunkInfo.newBuilder() - .setChunkName(blockID.getLocalID() + "_chunk") - .setOffset(0) - .setLen(data.size()) - .addMetadata(keyValue) - .setChecksumData(checksum(data).getProtoBufMessage()) - .build(); - final PutSmallFileRequestProto putSmallFileRequest - = PutSmallFileRequestProto.newBuilder() - .setChunkInfo(chunk) - .setBlock(putBlockRequest) - .setData(data) - .build(); - return ContainerCommandRequestProto.newBuilder() - .setCmdType(Type.PutSmallFile) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(UUID.randomUUID().toString()) - .setPutSmallFile(putSmallFileRequest) - .build(); - } - - static ContainerCommandRequestProto newWriteChunk( - BlockID blockID, ByteString data) { - final ChunkInfo chunk = ChunkInfo.newBuilder() - .setChunkName(blockID.getLocalID() + "_chunk_" + 1) - .setOffset(0) - .setLen(data.size()) - .setChecksumData(checksum(data).getProtoBufMessage()) - .build(); - - final WriteChunkRequestProto.Builder writeChunkRequest - = WriteChunkRequestProto.newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()) - .setChunkData(chunk) - .setData(data); - return ContainerCommandRequestProto.newBuilder() - .setCmdType(Type.WriteChunk) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(UUID.randomUUID().toString()) - .setWriteChunk(writeChunkRequest) - .build(); - } - - @Test - public void testPutSmallFile() throws Exception { - runTest(TestContainerCommandRequestMessage::newPutSmallFile); - } - - @Test - public void testWriteChunk() throws Exception { - runTest(TestContainerCommandRequestMessage::newWriteChunk); - } - - static void runTest( - BiFunction method) - throws Exception { - for(int i = 0; i < 2; i++) { - runTest(i, method); - } - for(int i = 2; i < 1 << 10;) { - runTest(i + 1 + RANDOM.nextInt(i - 1), method); - i <<= 1; - runTest(i, method); - } - } - - static void runTest(int length, - BiFunction method) - throws Exception { - System.out.println("length=" + length); - final BlockID blockID = new BlockID(RANDOM.nextLong(), RANDOM.nextLong()); - final ByteString data = newData(length, RANDOM); - - final ContainerCommandRequestProto original = method.apply(blockID, data); - final ContainerCommandRequestMessage message - = ContainerCommandRequestMessage.toMessage(original, null); - final ContainerCommandRequestProto computed - = ContainerCommandRequestMessage.toProto(message.getContent(), null); - Assert.assertEquals(original, computed); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/exceptions/TestSCMExceptionResultCodes.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/exceptions/TestSCMExceptionResultCodes.java deleted file mode 100644 index b5b4684dda0ca..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/exceptions/TestSCMExceptionResultCodes.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.exceptions; - -import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes; -import org.apache.hadoop.hdds.protocol.proto. - ScmBlockLocationProtocolProtos.Status; -import org.junit.Assert; -import org.junit.Test; - -/** - * Test Result code mappping between SCMException and the protobuf definitions. - */ -public class TestSCMExceptionResultCodes { - - @Test - public void codeMapping() { - // ResultCode = SCMException definition - // Status = protobuf definition - Assert.assertEquals(ResultCodes.values().length, Status.values().length); - for (int i = 0; i < ResultCodes.values().length; i++) { - ResultCodes codeValue = ResultCodes.values()[i]; - Status protoBufValue = Status.values()[i]; - Assert.assertTrue(String - .format("Protobuf/Enum constant name mismatch %s %s", codeValue, - protoBufValue), sameName(codeValue.name(), protoBufValue.name())); - ResultCodes converted = ResultCodes.values()[protoBufValue.ordinal()]; - Assert.assertEquals(codeValue, converted); - } - } - - private boolean sameName(String codeValue, String protoBufValue) { - return codeValue.equals(protoBufValue); - } - -} - diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java deleted file mode 100644 index b31e4a8e9965c..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java +++ /dev/null @@ -1,953 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.apache.hadoop.conf.Configuration; -import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; -import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR; -import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.REGION_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.DATACENTER_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.NODEGROUP_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; - -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.stream.Collectors; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.junit.Assume.assumeTrue; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; -import org.junit.runner.RunWith; - -/** Test the network topology functions. */ -@RunWith(Parameterized.class) -public class TestNetworkTopologyImpl { - private static final Logger LOG = LoggerFactory.getLogger( - TestNetworkTopologyImpl.class); - private NetworkTopology cluster; - private Node[] dataNodes; - private Random random = new Random(); - - public TestNetworkTopologyImpl(NodeSchema[] schemas, Node[] nodeArray) { - NodeSchemaManager.getInstance().init(schemas, true); - cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance()); - dataNodes = nodeArray; - for (int i = 0; i < dataNodes.length; i++) { - cluster.add(dataNodes[i]); - } - } - - @Rule - public Timeout testTimeout = new Timeout(3000000); - - @Parameters - public static Collection setupDatanodes() { - Object[][] topologies = new Object[][]{ - {new NodeSchema[] {ROOT_SCHEMA, LEAF_SCHEMA}, - new Node[]{ - createDatanode("1.1.1.1", "/"), - createDatanode("2.2.2.2", "/"), - createDatanode("3.3.3.3", "/"), - createDatanode("4.4.4.4", "/"), - createDatanode("5.5.5.5", "/"), - createDatanode("6.6.6.6", "/"), - createDatanode("7.7.7.7", "/"), - createDatanode("8.8.8.8", "/"), - }}, - {new NodeSchema[] {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}, - new Node[]{ - createDatanode("1.1.1.1", "/r1"), - createDatanode("2.2.2.2", "/r1"), - createDatanode("3.3.3.3", "/r2"), - createDatanode("4.4.4.4", "/r2"), - createDatanode("5.5.5.5", "/r2"), - createDatanode("6.6.6.6", "/r3"), - createDatanode("7.7.7.7", "/r3"), - createDatanode("8.8.8.8", "/r3"), - }}, - {new NodeSchema[] - {ROOT_SCHEMA, DATACENTER_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}, - new Node[]{ - createDatanode("1.1.1.1", "/d1/r1"), - createDatanode("2.2.2.2", "/d1/r1"), - createDatanode("3.3.3.3", "/d1/r2"), - createDatanode("4.4.4.4", "/d1/r2"), - createDatanode("5.5.5.5", "/d1/r2"), - createDatanode("6.6.6.6", "/d2/r3"), - createDatanode("7.7.7.7", "/d2/r3"), - createDatanode("8.8.8.8", "/d2/r3"), - }}, - {new NodeSchema[] {ROOT_SCHEMA, DATACENTER_SCHEMA, RACK_SCHEMA, - NODEGROUP_SCHEMA, LEAF_SCHEMA}, - new Node[]{ - createDatanode("1.1.1.1", "/d1/r1/ng1"), - createDatanode("2.2.2.2", "/d1/r1/ng1"), - createDatanode("3.3.3.3", "/d1/r2/ng2"), - createDatanode("4.4.4.4", "/d1/r2/ng2"), - createDatanode("5.5.5.5", "/d1/r2/ng3"), - createDatanode("6.6.6.6", "/d2/r3/ng3"), - createDatanode("7.7.7.7", "/d2/r3/ng3"), - createDatanode("8.8.8.8", "/d2/r3/ng3"), - createDatanode("9.9.9.9", "/d3/r1/ng1"), - createDatanode("10.10.10.10", "/d3/r1/ng1"), - createDatanode("11.11.11.11", "/d3/r1/ng1"), - createDatanode("12.12.12.12", "/d3/r2/ng2"), - createDatanode("13.13.13.13", "/d3/r2/ng2"), - createDatanode("14.14.14.14", "/d4/r1/ng1"), - createDatanode("15.15.15.15", "/d4/r1/ng1"), - createDatanode("16.16.16.16", "/d4/r1/ng1"), - createDatanode("17.17.17.17", "/d4/r1/ng2"), - createDatanode("18.18.18.18", "/d4/r1/ng2"), - createDatanode("19.19.19.19", "/d4/r1/ng3"), - createDatanode("20.20.20.20", "/d4/r1/ng3"), - }}, - {new NodeSchema[] {ROOT_SCHEMA, REGION_SCHEMA, DATACENTER_SCHEMA, - RACK_SCHEMA, NODEGROUP_SCHEMA, LEAF_SCHEMA}, - new Node[]{ - createDatanode("1.1.1.1", "/d1/rg1/r1/ng1"), - createDatanode("2.2.2.2", "/d1/rg1/r1/ng1"), - createDatanode("3.3.3.3", "/d1/rg1/r1/ng2"), - createDatanode("4.4.4.4", "/d1/rg1/r1/ng1"), - createDatanode("5.5.5.5", "/d1/rg1/r1/ng1"), - createDatanode("6.6.6.6", "/d1/rg1/r1/ng2"), - createDatanode("7.7.7.7", "/d1/rg1/r1/ng2"), - createDatanode("8.8.8.8", "/d1/rg1/r1/ng2"), - createDatanode("9.9.9.9", "/d1/rg1/r1/ng2"), - createDatanode("10.10.10.10", "/d1/rg1/r1/ng2"), - createDatanode("11.11.11.11", "/d1/rg1/r2/ng1"), - createDatanode("12.12.12.12", "/d1/rg1/r2/ng1"), - createDatanode("13.13.13.13", "/d1/rg1/r2/ng1"), - createDatanode("14.14.14.14", "/d1/rg1/r2/ng1"), - createDatanode("15.15.15.15", "/d1/rg1/r2/ng1"), - createDatanode("16.16.16.16", "/d1/rg1/r2/ng2"), - createDatanode("17.17.17.17", "/d1/rg1/r2/ng2"), - createDatanode("18.18.18.18", "/d1/rg1/r2/ng2"), - createDatanode("19.19.19.19", "/d1/rg1/r2/ng2"), - createDatanode("20.20.20.20", "/d1/rg1/r2/ng2"), - createDatanode("21.21.21.21", "/d2/rg1/r2/ng1"), - createDatanode("22.22.22.22", "/d2/rg1/r2/ng1"), - createDatanode("23.23.23.23", "/d2/rg2/r2/ng1"), - createDatanode("24.24.24.24", "/d2/rg2/r2/ng1"), - createDatanode("25.25.25.25", "/d2/rg2/r2/ng1"), - }} - }; - return Arrays.asList(topologies); - } - - @Test - public void testContains() { - Node nodeNotInMap = createDatanode("8.8.8.8", "/d2/r4"); - for (int i=0; i < dataNodes.length; i++) { - assertTrue(cluster.contains(dataNodes[i])); - } - assertFalse(cluster.contains(nodeNotInMap)); - } - - @Test - public void testNumOfChildren() { - assertEquals(dataNodes.length, cluster.getNumOfLeafNode(null)); - assertEquals(0, cluster.getNumOfLeafNode("/switch1/node1")); - } - - @Test - public void testGetNode() { - assertEquals(cluster.getNode(""), cluster.getNode(null)); - assertEquals(cluster.getNode(""), cluster.getNode("/")); - assertEquals(null, cluster.getNode("/switch1/node1")); - assertEquals(null, cluster.getNode("/switch1")); - } - - @Test - public void testCreateInvalidTopology() { - List schemas = new ArrayList(); - schemas.add(ROOT_SCHEMA); - schemas.add(RACK_SCHEMA); - schemas.add(LEAF_SCHEMA); - NodeSchemaManager.getInstance().init(schemas.toArray(new NodeSchema[0]), - true); - NetworkTopology newCluster = new NetworkTopologyImpl( - NodeSchemaManager.getInstance()); - Node[] invalidDataNodes = new Node[] { - createDatanode("1.1.1.1", "/r1"), - createDatanode("2.2.2.2", "/r2"), - createDatanode("3.3.3.3", "/d1/r2") - }; - newCluster.add(invalidDataNodes[0]); - newCluster.add(invalidDataNodes[1]); - try { - newCluster.add(invalidDataNodes[2]); - fail("expected InvalidTopologyException"); - } catch (NetworkTopology.InvalidTopologyException e) { - assertTrue(e.getMessage().contains("Failed to add")); - assertTrue(e.getMessage().contains("Its path depth is not " + - newCluster.getMaxLevel())); - } - } - - @Test - public void testInitWithConfigFile() { - ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); - Configuration conf = new Configuration(); - try { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/good.xml").getPath(); - conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, filePath); - NetworkTopology newCluster = new NetworkTopologyImpl(conf); - LOG.info("network topology max level = " + newCluster.getMaxLevel()); - } catch (Throwable e) { - fail("should succeed"); - } - } - - @Test - public void testAncestor() { - assumeTrue(cluster.getMaxLevel() > 2); - int maxLevel = cluster.getMaxLevel(); - assertTrue(cluster.isSameParent(dataNodes[0], dataNodes[1])); - while(maxLevel > 1) { - assertTrue(cluster.isSameAncestor(dataNodes[0], dataNodes[1], - maxLevel - 1)); - maxLevel--; - } - assertFalse(cluster.isSameParent(dataNodes[1], dataNodes[2])); - assertFalse(cluster.isSameParent(null, dataNodes[2])); - assertFalse(cluster.isSameParent(dataNodes[1], null)); - assertFalse(cluster.isSameParent(null, null)); - - assertFalse(cluster.isSameAncestor(dataNodes[1], dataNodes[2], 0)); - assertFalse(cluster.isSameAncestor(dataNodes[1], null, 1)); - assertFalse(cluster.isSameAncestor(null, dataNodes[2], 1)); - assertFalse(cluster.isSameAncestor(null, null, 1)); - - maxLevel = cluster.getMaxLevel(); - assertTrue(cluster.isSameAncestor( - dataNodes[random.nextInt(cluster.getNumOfLeafNode(null))], - dataNodes[random.nextInt(cluster.getNumOfLeafNode(null))], - maxLevel - 1)); - } - - @Test - public void testAddRemove() { - for(int i = 0; i < dataNodes.length; i++) { - cluster.remove(dataNodes[i]); - } - for(int i = 0; i < dataNodes.length; i++) { - assertFalse(cluster.contains(dataNodes[i])); - } - // no leaf nodes - assertEquals(0, cluster.getNumOfLeafNode(null)); - // no inner nodes - assertEquals(0, cluster.getNumOfNodes(2)); - for(int i = 0; i < dataNodes.length; i++) { - cluster.add(dataNodes[i]); - } - // Inner nodes are created automatically - assertTrue(cluster.getNumOfNodes(2) > 0); - - try { - cluster.add(cluster.chooseRandom(null).getParent()); - fail("Inner node can not be added manually"); - } catch (Exception e) { - assertTrue(e.getMessage().startsWith( - "Not allowed to add an inner node")); - } - - try { - cluster.remove(cluster.chooseRandom(null).getParent()); - fail("Inner node can not be removed manually"); - } catch (Exception e) { - assertTrue(e.getMessage().startsWith( - "Not allowed to remove an inner node")); - } - } - - @Test - public void testGetNodesWithLevel() { - int maxLevel = cluster.getMaxLevel(); - try { - assertEquals(1, cluster.getNumOfNodes(0)); - fail("level 0 is not supported"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().startsWith("Invalid level")); - } - - try { - assertEquals(1, cluster.getNumOfNodes(0)); - fail("level 0 is not supported"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().startsWith("Invalid level")); - } - - try { - assertEquals(1, cluster.getNumOfNodes(maxLevel + 1)); - fail("level out of scope"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().startsWith("Invalid level")); - } - - try { - assertEquals(1, cluster.getNumOfNodes(maxLevel + 1)); - fail("level out of scope"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().startsWith("Invalid level")); - } - // root node - assertEquals(1, cluster.getNumOfNodes(1)); - assertEquals(1, cluster.getNumOfNodes(1)); - // leaf nodes - assertEquals(dataNodes.length, cluster.getNumOfNodes(maxLevel)); - assertEquals(dataNodes.length, cluster.getNumOfNodes(maxLevel)); - } - - @Test - public void testChooseRandomSimple() { - String path = - dataNodes[random.nextInt(dataNodes.length)].getNetworkFullPath(); - assertEquals(path, cluster.chooseRandom(path).getNetworkFullPath()); - path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR)); - // test chooseRandom(String scope) - while (!path.equals(ROOT)) { - assertTrue(cluster.chooseRandom(path).getNetworkLocation() - .startsWith(path)); - Node node = cluster.chooseRandom("~" + path); - assertTrue(!node.getNetworkLocation() - .startsWith(path)); - path = path.substring(0, - path.lastIndexOf(PATH_SEPARATOR_STR)); - } - assertNotNull(cluster.chooseRandom(null)); - assertNotNull(cluster.chooseRandom("")); - assertNotNull(cluster.chooseRandom("/")); - assertNull(cluster.chooseRandom("~")); - assertNull(cluster.chooseRandom("~/")); - - // test chooseRandom(String scope, String excludedScope) - path = dataNodes[random.nextInt(dataNodes.length)].getNetworkFullPath(); - List pathList = new ArrayList<>(); - pathList.add(path); - assertNull(cluster.chooseRandom(path, pathList)); - assertNotNull(cluster.chooseRandom(null, pathList)); - assertNotNull(cluster.chooseRandom("", pathList)); - - // test chooseRandom(String scope, Collection excludedNodes) - assertNull(cluster.chooseRandom("", Arrays.asList(dataNodes))); - assertNull(cluster.chooseRandom("/", Arrays.asList(dataNodes))); - assertNull(cluster.chooseRandom("~", Arrays.asList(dataNodes))); - assertNull(cluster.chooseRandom("~/", Arrays.asList(dataNodes))); - assertNull(cluster.chooseRandom(null, Arrays.asList(dataNodes))); - } - - /** - * Following test checks that chooseRandom works for an excluded scope. - */ - @Test - public void testChooseRandomExcludedScope() { - int[] excludedNodeIndexs = {0, dataNodes.length - 1, - random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)}; - String scope; - Map frequency; - for (int i : excludedNodeIndexs) { - String path = dataNodes[i].getNetworkFullPath(); - while (!path.equals(ROOT)) { - scope = "~" + path; - frequency = pickNodesAtRandom(100, scope, null, 0); - for (Node key : dataNodes) { - if (key.getNetworkFullPath().startsWith(path)) { - assertTrue(frequency.get(key) == 0); - } - } - path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR)); - } - } - - // null excludedScope, every node should be chosen - frequency = pickNodes(100, null, null, null, 0); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) != 0); - } - - // "" excludedScope, no node will ever be chosen - List pathList = new ArrayList(); - pathList.add(""); - frequency = pickNodes(100, pathList, null, null, 0); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) == 0); - } - - // "~" scope, no node will ever be chosen - scope = "~"; - frequency = pickNodesAtRandom(100, scope, null, 0); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) == 0); - } - // out network topology excluded scope, every node should be chosen - pathList.clear(); - pathList.add("/city1"); - frequency = pickNodes( - cluster.getNumOfLeafNode(null), pathList, null, null, 0); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) != 0); - } - } - - /** - * Following test checks that chooseRandom works for an excluded nodes. - */ - @Test - public void testChooseRandomExcludedNode() { - Node[][] excludedNodeLists = { - {}, - {dataNodes[0]}, - {dataNodes[dataNodes.length - 1]}, - {dataNodes[random.nextInt(dataNodes.length)]}, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)] - }, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - }}; - int leafNum = cluster.getNumOfLeafNode(null); - Map frequency; - for(Node[] list : excludedNodeLists) { - List excludedList = Arrays.asList(list); - int ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { - frequency = pickNodesAtRandom(leafNum, null, excludedList, ancestorGen); - List ancestorList = NetUtils.getAncestorList(cluster, - excludedList, ancestorGen); - for (Node key : dataNodes) { - if (excludedList.contains(key) || - (ancestorList.size() > 0 && - ancestorList.stream() - .map(a -> (InnerNode) a) - .filter(a -> a.isAncestor(key)) - .collect(Collectors.toList()).size() > 0)) { - assertTrue(frequency.get(key) == 0); - } - } - ancestorGen++; - } - } - // all nodes excluded, no node will be picked - List excludedList = Arrays.asList(dataNodes); - int ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { - frequency = pickNodesAtRandom(leafNum, null, excludedList, ancestorGen); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) == 0); - } - ancestorGen++; - } - // out scope excluded nodes, each node will be picked - excludedList = Arrays.asList(createDatanode("1.1.1.1.", "/city1/rack1")); - ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { - frequency = pickNodes(leafNum, null, excludedList, null, ancestorGen); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) != 0); - } - ancestorGen++; - } - } - - /** - * Following test checks that chooseRandom works for excluded nodes and scope. - */ - @Test - public void testChooseRandomExcludedNodeAndScope() { - int[] excludedNodeIndexs = {0, dataNodes.length - 1, - random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)}; - Node[][] excludedNodeLists = { - {}, - {dataNodes[0]}, - {dataNodes[dataNodes.length - 1]}, - {dataNodes[random.nextInt(dataNodes.length)]}, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)] - }, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - }}; - int leafNum = cluster.getNumOfLeafNode(null); - Map frequency; - String scope; - for (int i : excludedNodeIndexs) { - String path = dataNodes[i].getNetworkFullPath(); - while (!path.equals(ROOT)) { - scope = "~" + path; - int ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { - for (Node[] list : excludedNodeLists) { - List excludedList = Arrays.asList(list); - frequency = - pickNodesAtRandom(leafNum, scope, excludedList, ancestorGen); - List ancestorList = NetUtils.getAncestorList(cluster, - excludedList, ancestorGen); - for (Node key : dataNodes) { - if (excludedList.contains(key) || - key.getNetworkFullPath().startsWith(path) || - (ancestorList.size() > 0 && - ancestorList.stream() - .map(a -> (InnerNode) a) - .filter(a -> a.isAncestor(key)) - .collect(Collectors.toList()).size() > 0)) { - assertTrue(frequency.get(key) == 0); - } - } - } - ancestorGen++; - } - path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR)); - } - } - // all nodes excluded, no node will be picked - List excludedList = Arrays.asList(dataNodes); - for (int i : excludedNodeIndexs) { - String path = dataNodes[i].getNetworkFullPath(); - while (!path.equals(ROOT)) { - scope = "~" + path; - int ancestorGen = 0; - while (ancestorGen < cluster.getMaxLevel()) { - frequency = - pickNodesAtRandom(leafNum, scope, excludedList, ancestorGen); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) == 0); - } - ancestorGen++; - } - path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR)); - } - } - - // no node excluded and no excluded scope, each node will be picked - int ancestorGen = 0; - while (ancestorGen < cluster.getMaxLevel()) { - frequency = pickNodes(leafNum, null, null, null, ancestorGen); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) != 0); - } - ancestorGen++; - } - } - - /** - * Following test checks that chooseRandom works for excluded nodes, scope - * and ancestor generation. - */ - @Test - public void testChooseRandomWithAffinityNode() { - int[] excludedNodeIndexs = {0, dataNodes.length - 1, - random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)}; - Node[][] excludedNodeLists = { - {}, - {dataNodes[0]}, - {dataNodes[dataNodes.length - 1]}, - {dataNodes[random.nextInt(dataNodes.length)]}, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)] - }, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - }}; - int[] affinityNodeIndexs = {0, dataNodes.length - 1, - random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)}; - Node[][] excludedScopeIndexs = {{dataNodes[0]}, - {dataNodes[dataNodes.length - 1]}, - {dataNodes[random.nextInt(dataNodes.length)]}, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)] - }, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - }}; - int leafNum = cluster.getNumOfLeafNode(null); - Map frequency; - List pathList = new ArrayList<>(); - for (int k : affinityNodeIndexs) { - for (Node[] excludedScopes : excludedScopeIndexs) { - pathList.clear(); - pathList.addAll(Arrays.stream(excludedScopes) - .map(node -> node.getNetworkFullPath()) - .collect(Collectors.toList())); - while (!pathList.get(0).equals(ROOT)) { - int ancestorGen = cluster.getMaxLevel() - 1; - while (ancestorGen > 0) { - for (Node[] list : excludedNodeLists) { - List excludedList = Arrays.asList(list); - frequency = pickNodes(leafNum, pathList, excludedList, - dataNodes[k], ancestorGen); - Node affinityAncestor = dataNodes[k].getAncestor(ancestorGen); - for (Node key : dataNodes) { - if (affinityAncestor != null) { - if (frequency.get(key) > 0) { - assertTrue(affinityAncestor.isAncestor(key)); - } else if (!affinityAncestor.isAncestor(key)) { - continue; - } else if (excludedList != null && - excludedList.contains(key)) { - continue; - } else if (pathList != null && - pathList.stream().anyMatch(path -> - key.getNetworkFullPath().startsWith(path))) { - continue; - } else { - fail("Node is not picked when sequentially going " + - "through ancestor node's leaf nodes. node:" + - key.getNetworkFullPath() + ", ancestor node:" + - affinityAncestor.getNetworkFullPath() + - ", excludedScope: " + pathList.toString() + ", " + - "excludedList:" + (excludedList == null ? "" : - excludedList.toString())); - } - } - } - } - ancestorGen--; - } - pathList = pathList.stream().map(path -> - path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR))) - .collect(Collectors.toList()); - } - } - } - - // all nodes excluded, no node will be picked - String scope; - List excludedList = Arrays.asList(dataNodes); - for (int k : affinityNodeIndexs) { - for (int i : excludedNodeIndexs) { - String path = dataNodes[i].getNetworkFullPath(); - while (!path.equals(ROOT)) { - scope = "~" + path; - int ancestorGen = 0; - while (ancestorGen < cluster.getMaxLevel()) { - frequency = pickNodesAtRandom(leafNum, scope, excludedList, - dataNodes[k], ancestorGen); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) == 0); - } - ancestorGen++; - } - path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR)); - } - } - } - // no node excluded and no excluded scope, each node will be picked - int ancestorGen = cluster.getMaxLevel() - 1; - for (int k : affinityNodeIndexs) { - while (ancestorGen > 0) { - frequency = - pickNodes(leafNum, null, null, dataNodes[k], ancestorGen); - Node affinityAncestor = dataNodes[k].getAncestor(ancestorGen); - for (Node key : dataNodes) { - if (frequency.get(key) > 0) { - if (affinityAncestor != null) { - assertTrue(affinityAncestor.isAncestor(key)); - } - } - } - ancestorGen--; - } - } - // check invalid ancestor generation - try { - cluster.chooseRandom(null, null, null, dataNodes[0], - cluster.getMaxLevel()); - fail("ancestor generation exceeds max level, should fail"); - } catch (Exception e) { - assertTrue(e.getMessage().startsWith("ancestorGen " + - cluster.getMaxLevel() + - " exceeds this network topology acceptable level")); - } - } - - @Test - public void testCost() { - // network topology with default cost - List schemas = new ArrayList<>(); - schemas.add(ROOT_SCHEMA); - schemas.add(RACK_SCHEMA); - schemas.add(NODEGROUP_SCHEMA); - schemas.add(LEAF_SCHEMA); - - NodeSchemaManager manager = NodeSchemaManager.getInstance(); - manager.init(schemas.toArray(new NodeSchema[0]), true); - NetworkTopology newCluster = - new NetworkTopologyImpl(manager); - Node[] nodeList = new Node[] { - createDatanode("1.1.1.1", "/r1/ng1"), - createDatanode("2.2.2.2", "/r1/ng1"), - createDatanode("3.3.3.3", "/r1/ng2"), - createDatanode("4.4.4.4", "/r2/ng1"), - }; - for (Node node: nodeList) { - newCluster.add(node); - } - Node outScopeNode1 = createDatanode("5.5.5.5", "/r2/ng2"); - Node outScopeNode2 = createDatanode("6.6.6.6", "/r2/ng2"); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(nodeList[0], null)); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(null, nodeList[0])); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(outScopeNode1, nodeList[0])); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(nodeList[0], outScopeNode1)); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(outScopeNode1, outScopeNode2)); - - assertEquals(0, newCluster.getDistanceCost(null, null)); - assertEquals(0, newCluster.getDistanceCost(nodeList[0], nodeList[0])); - assertEquals(2, newCluster.getDistanceCost(nodeList[0], nodeList[1])); - assertEquals(4, newCluster.getDistanceCost(nodeList[0], nodeList[2])); - assertEquals(6, newCluster.getDistanceCost(nodeList[0], nodeList[3])); - - // network topology with customized cost - schemas.clear(); - schemas.add(new NodeSchema.Builder() - .setType(NodeSchema.LayerType.ROOT).setCost(5).build()); - schemas.add(new NodeSchema.Builder() - .setType(NodeSchema.LayerType.INNER_NODE).setCost(3).build()); - schemas.add(new NodeSchema.Builder() - .setType(NodeSchema.LayerType.INNER_NODE).setCost(1).build()); - schemas.add(new NodeSchema.Builder() - .setType(NodeSchema.LayerType.LEAF_NODE).build()); - manager = NodeSchemaManager.getInstance(); - manager.init(schemas.toArray(new NodeSchema[0]), true); - newCluster = new NetworkTopologyImpl(manager); - for (Node node: nodeList) { - newCluster.add(node); - } - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(nodeList[0], null)); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(null, nodeList[0])); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(outScopeNode1, nodeList[0])); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(nodeList[0], outScopeNode1)); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(outScopeNode1, outScopeNode2)); - - assertEquals(0, newCluster.getDistanceCost(null, null)); - assertEquals(0, newCluster.getDistanceCost(nodeList[0], nodeList[0])); - assertEquals(2, newCluster.getDistanceCost(nodeList[0], nodeList[1])); - assertEquals(8, newCluster.getDistanceCost(nodeList[0], nodeList[2])); - assertEquals(18, newCluster.getDistanceCost(nodeList[0], nodeList[3])); - } - - @Test - public void testSortByDistanceCost() { - Node[][] nodes = { - {}, - {dataNodes[0]}, - {dataNodes[dataNodes.length - 1]}, - {dataNodes[random.nextInt(dataNodes.length)]}, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)] - }, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - }, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - }, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - }}; - Node[] readers = {null, dataNodes[0], dataNodes[dataNodes.length - 1], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)] - }; - for (Node reader : readers) { - for (Node[] nodeList : nodes) { - int length = nodeList.length; - while (length > 0) { - List ret = cluster.sortByDistanceCost(reader, - Arrays.asList(nodeList), length); - for (int i = 0; i < ret.size(); i++) { - if ((i + 1) < ret.size()) { - int cost1 = cluster.getDistanceCost(reader, ret.get(i)); - int cost2 = cluster.getDistanceCost(reader, ret.get(i + 1)); - assertTrue("reader:" + (reader != null ? - reader.getNetworkFullPath() : "null") + - ",node1:" + ret.get(i).getNetworkFullPath() + - ",node2:" + ret.get(i + 1).getNetworkFullPath() + - ",cost1:" + cost1 + ",cost2:" + cost2, - cost1 == Integer.MAX_VALUE || cost1 <= cost2); - } - } - length--; - } - } - } - - // sort all nodes - List nodeList = Arrays.asList(dataNodes.clone()); - for (Node reader : readers) { - int length = nodeList.size(); - while (length >= 0) { - List sortedNodeList = - cluster.sortByDistanceCost(reader, nodeList, length); - for (int i = 0; i < sortedNodeList.size(); i++) { - if ((i + 1) < sortedNodeList.size()) { - int cost1 = cluster.getDistanceCost(reader, sortedNodeList.get(i)); - int cost2 = cluster.getDistanceCost( - reader, sortedNodeList.get(i + 1)); - // node can be removed when called in testConcurrentAccess - assertTrue("reader:" + (reader != null ? - reader.getNetworkFullPath() : "null") + - ",node1:" + sortedNodeList.get(i).getNetworkFullPath() + - ",node2:" + sortedNodeList.get(i + 1).getNetworkFullPath() + - ",cost1:" + cost1 + ",cost2:" + cost2, - cost1 == Integer.MAX_VALUE || cost1 <= cost2); - } - } - length--; - } - } - } - - private static Node createDatanode(String name, String path) { - return new NodeImpl(name, path, NetConstants.NODE_COST_DEFAULT); - } - - /** - * This picks a large number of nodes at random in order to ensure coverage. - * - * @param numNodes the number of nodes - * @param excludedScope the excluded scope - * @param excludedNodes the excluded node list - * @param ancestorGen the chosen node cannot share the same ancestor at - * this generation with excludedNodes - * @return the frequency that nodes were chosen - */ - private Map pickNodesAtRandom(int numNodes, - String excludedScope, Collection excludedNodes, int ancestorGen) { - Map frequency = new HashMap(); - for (Node dnd : dataNodes) { - frequency.put(dnd, 0); - } - for (int j = 0; j < numNodes; j++) { - Node node = cluster.chooseRandom(excludedScope, excludedNodes, - ancestorGen); - if (node != null) { - frequency.put(node, frequency.get(node) + 1); - } - } - LOG.info("Result:" + frequency); - return frequency; - } - - /** - * This picks a large number of nodes at random in order to ensure coverage. - * - * @param numNodes the number of nodes - * @param excludedScope the excluded scope - * @param excludedNodes the excluded node list - * @param affinityNode the chosen node should share the same ancestor at - * generation "ancestorGen" with this node - * @param ancestorGen the chosen node cannot share the same ancestor at - * this generation with excludedNodes - * @return the frequency that nodes were chosen - */ - private Map pickNodesAtRandom(int numNodes, - String excludedScope, Collection excludedNodes, Node affinityNode, - int ancestorGen) { - Map frequency = new HashMap(); - for (Node dnd : dataNodes) { - frequency.put(dnd, 0); - } - - List pathList = new ArrayList<>(); - pathList.add(excludedScope.substring(1)); - for (int j = 0; j < numNodes; j++) { - - Node node = cluster.chooseRandom("", pathList, excludedNodes, - affinityNode, ancestorGen); - if (node != null) { - frequency.put(node, frequency.get(node) + 1); - } - } - LOG.info("Result:" + frequency); - return frequency; - } - - /** - * This picks a large amount of nodes sequentially. - * - * @param numNodes the number of nodes - * @param excludedScopes the excluded scopes, should not start with "~" - * @param excludedNodes the excluded node list - * @param affinityNode the chosen node should share the same ancestor at - * generation "ancestorGen" with this node - * @param ancestorGen the chosen node cannot share the same ancestor at - * this generation with excludedNodes - * @return the frequency that nodes were chosen - */ - private Map pickNodes(int numNodes, - List excludedScopes, Collection excludedNodes, - Node affinityNode, int ancestorGen) { - Map frequency = new HashMap<>(); - for (Node dnd : dataNodes) { - frequency.put(dnd, 0); - } - excludedNodes = excludedNodes == null ? null : - excludedNodes.stream().distinct().collect(Collectors.toList()); - for (int j = 0; j < numNodes; j++) { - Node node = cluster.getNode(j, null, excludedScopes, excludedNodes, - affinityNode, ancestorGen); - if (node != null) { - frequency.put(node, frequency.get(node) + 1); - } - } - - LOG.info("Result:" + frequency); - return frequency; - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java deleted file mode 100644 index 0c20353a2ce0c..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Arrays; -import java.util.Collection; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** Test the node schema loader. */ -@RunWith(Parameterized.class) -public class TestNodeSchemaLoader { - private static final Logger LOG = - LoggerFactory.getLogger(TestNodeSchemaLoader.class); - private ClassLoader classLoader = - Thread.currentThread().getContextClassLoader(); - - public TestNodeSchemaLoader(String schemaFile, String errMsg) { - try { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/" + schemaFile).getPath(); - NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); - fail("expect exceptions"); - } catch (Throwable e) { - assertTrue(e.getMessage().contains(errMsg)); - } - } - - @Rule - public Timeout testTimeout = new Timeout(30000); - - @Parameters - public static Collection getSchemaFiles() { - Object[][] schemaFiles = new Object[][]{ - {"enforce-error.xml", "layer without prefix defined"}, - {"invalid-cost.xml", "Cost should be positive number or 0"}, - {"multiple-leaf.xml", "Multiple LEAF layers are found"}, - {"multiple-root.xml", "Multiple ROOT layers are found"}, - {"no-leaf.xml", "No LEAF layer is found"}, - {"no-root.xml", "No ROOT layer is found"}, - {"path-layers-size-mismatch.xml", - "Topology path depth doesn't match layer element numbers"}, - {"path-with-id-reference-failure.xml", - "No layer found for id"}, - {"unknown-layer-type.xml", "Unsupported layer type"}, - {"wrong-path-order-1.xml", - "Topology path doesn't start with ROOT layer"}, - {"wrong-path-order-2.xml", "Topology path doesn't end with LEAF layer"}, - {"no-topology.xml", "no or multiple element"}, - {"multiple-topology.xml", "no or multiple element"}, - {"invalid-version.xml", "Bad layoutversion value"}, - }; - return Arrays.asList(schemaFiles); - } - - @Test - public void testGood() { - try { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/good.xml").getPath(); - NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); - } catch (Throwable e) { - fail("should succeed"); - } - } - - @Test - public void testNotExist() { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/good.xml").getPath() + ".backup"; - try { - NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); - fail("should fail"); - } catch (Throwable e) { - assertTrue(e.getMessage().contains("not found")); - } - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java deleted file mode 100644 index 6698043727649..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.hdds.scm.net.NetConstants.DEFAULT_NODEGROUP; -import static org.apache.hadoop.hdds.scm.net.NetConstants.DEFAULT_RACK; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** Test the node schema loader. */ -public class TestNodeSchemaManager { - private static final Logger LOG = - LoggerFactory.getLogger(TestNodeSchemaManager.class); - private ClassLoader classLoader = - Thread.currentThread().getContextClassLoader(); - private NodeSchemaManager manager; - private Configuration conf; - - public TestNodeSchemaManager() { - conf = new Configuration(); - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/good.xml").getPath(); - conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, filePath); - manager = NodeSchemaManager.getInstance(); - manager.init(conf); - } - - @Rule - public Timeout testTimeout = new Timeout(30000); - - @Test(expected = IllegalArgumentException.class) - public void testFailure1() { - manager.getCost(0); - } - - @Test(expected = IllegalArgumentException.class) - public void testFailure2() { - manager.getCost(manager.getMaxLevel() + 1); - } - - @Test - public void testPass() { - assertEquals(4, manager.getMaxLevel()); - for (int i = 1; i <= manager.getMaxLevel(); i++) { - assertTrue(manager.getCost(i) == 1 || manager.getCost(i) == 0); - } - } - - @Test - public void testInitFailure() { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/good.xml").getPath() + ".backup"; - conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, filePath); - try { - manager.init(conf); - fail("should fail"); - } catch (Throwable e) { - assertTrue(e.getMessage().contains("Failed to load schema file:" + - filePath)); - } - } - - @Test - public void testComplete() { - // successful complete action - String path = "/node1"; - assertEquals(DEFAULT_RACK + DEFAULT_NODEGROUP + path, - manager.complete(path)); - assertEquals("/rack" + DEFAULT_NODEGROUP + path, - manager.complete("/rack" + path)); - assertEquals(DEFAULT_RACK + "/nodegroup" + path, - manager.complete("/nodegroup" + path)); - - // failed complete action - assertEquals(null, manager.complete("/dc" + path)); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java deleted file mode 100644 index c38bf388363cf..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Arrays; -import java.util.Collection; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** Test the node schema loader. */ -@RunWith(Parameterized.class) -public class TestYamlSchemaLoader { - private static final Logger LOG = - LoggerFactory.getLogger(TestYamlSchemaLoader.class); - private ClassLoader classLoader = - Thread.currentThread().getContextClassLoader(); - - public TestYamlSchemaLoader(String schemaFile, String errMsg) { - try { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/" + schemaFile).getPath(); - NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); - fail("expect exceptions"); - } catch (Throwable e) { - assertTrue(e.getMessage().contains(errMsg)); - } - } - - @Rule - public Timeout testTimeout = new Timeout(30000); - - @Parameters - public static Collection getSchemaFiles() { - Object[][] schemaFiles = new Object[][]{ - {"multiple-root.yaml", "Multiple root"}, - {"middle-leaf.yaml", "Leaf node in the middle"}, - }; - return Arrays.asList(schemaFiles); - } - - - @Test - public void testGood() { - try { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/good.yaml").getPath(); - NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); - } catch (Throwable e) { - fail("should succeed"); - } - } - - @Test - public void testNotExist() { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/good.yaml").getPath() + ".backup"; - try { - NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); - fail("should fail"); - } catch (Throwable e) { - assertTrue(e.getMessage().contains("not found")); - } - } - -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java deleted file mode 100644 index 796694171f486..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; -/** - Test cases for SCM client classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java deleted file mode 100644 index 77a2cecd79a25..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java +++ /dev/null @@ -1,313 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.security.token; - -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; -import java.io.File; -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.security.InvalidKeyException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PrivateKey; -import java.security.Signature; -import java.security.SignatureException; -import java.security.cert.Certificate; -import java.security.cert.CertificateEncodingException; -import java.security.cert.X509Certificate; -import java.util.ArrayList; -import java.util.Collections; -import java.util.EnumSet; -import java.util.List; -import java.util.Map; -import javax.crypto.KeyGenerator; -import javax.crypto.Mac; -import javax.crypto.SecretKey; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.Time; -import org.junit.After; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Test class for OzoneManagerDelegationToken. - */ -public class TestOzoneBlockTokenIdentifier { - - private static final Logger LOG = LoggerFactory - .getLogger(TestOzoneBlockTokenIdentifier.class); - private static final String BASEDIR = GenericTestUtils - .getTempPath(TestOzoneBlockTokenIdentifier.class.getSimpleName()); - private static final String KEYSTORES_DIR = - new File(BASEDIR).getAbsolutePath(); - private static long expiryTime; - private static KeyPair keyPair; - private static X509Certificate cert; - - @BeforeClass - public static void setUp() throws Exception { - File base = new File(BASEDIR); - FileUtil.fullyDelete(base); - base.mkdirs(); - expiryTime = Time.monotonicNow() + 60 * 60 * 24; - - // Create Ozone Master key pair. - keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); - // Create Ozone Master certificate (SCM CA issued cert) and key store. - cert = KeyStoreTestUtil - .generateCertificate("CN=OzoneMaster", keyPair, 30, "SHA256withRSA"); - } - - @After - public void cleanUp() throws Exception { - // KeyStoreTestUtil.cleanupSSLConfig(KEYSTORES_DIR, sslConfsDir); - } - - @Test - public void testSignToken() throws GeneralSecurityException, IOException { - String keystore = new File(KEYSTORES_DIR, "keystore.jks") - .getAbsolutePath(); - String truststore = new File(KEYSTORES_DIR, "truststore.jks") - .getAbsolutePath(); - String trustPassword = "trustPass"; - String keyStorePassword = "keyStorePass"; - String keyPassword = "keyPass"; - - - KeyStoreTestUtil.createKeyStore(keystore, keyStorePassword, keyPassword, - "OzoneMaster", keyPair.getPrivate(), cert); - - // Create trust store and put the certificate in the trust store - Map certs = Collections.singletonMap("server", - cert); - KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs); - - // Sign the OzoneMaster Token with Ozone Master private key - PrivateKey privateKey = keyPair.getPrivate(); - OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier( - "testUser", "84940", - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), 128L); - byte[] signedToken = signTokenAsymmetric(tokenId, privateKey); - - // Verify a valid signed OzoneMaster Token with Ozone Master - // public key(certificate) - boolean isValidToken = verifyTokenAsymmetric(tokenId, signedToken, cert); - LOG.info("{} is {}", tokenId, isValidToken ? "valid." : "invalid."); - - // Verify an invalid signed OzoneMaster Token with Ozone Master - // public key(certificate) - tokenId = new OzoneBlockTokenIdentifier("", "", - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), 128L); - LOG.info("Unsigned token {} is {}", tokenId, - verifyTokenAsymmetric(tokenId, RandomUtils.nextBytes(128), cert)); - - } - - @Test - public void testTokenSerialization() throws GeneralSecurityException, - IOException { - String keystore = new File(KEYSTORES_DIR, "keystore.jks") - .getAbsolutePath(); - String truststore = new File(KEYSTORES_DIR, "truststore.jks") - .getAbsolutePath(); - String trustPassword = "trustPass"; - String keyStorePassword = "keyStorePass"; - String keyPassword = "keyPass"; - long maxLength = 128L; - - KeyStoreTestUtil.createKeyStore(keystore, keyStorePassword, keyPassword, - "OzoneMaster", keyPair.getPrivate(), cert); - - // Create trust store and put the certificate in the trust store - Map certs = Collections.singletonMap("server", - cert); - KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs); - - // Sign the OzoneMaster Token with Ozone Master private key - PrivateKey privateKey = keyPair.getPrivate(); - OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier( - "testUser", "84940", - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), maxLength); - byte[] signedToken = signTokenAsymmetric(tokenId, privateKey); - - - Token token = new Token(tokenId.getBytes(), - signedToken, tokenId.getKind(), new Text("host:port")); - - String encodeToUrlString = token.encodeToUrlString(); - - TokendecodedToken = new Token(); - decodedToken.decodeFromUrlString(encodeToUrlString); - - OzoneBlockTokenIdentifier decodedTokenId = new OzoneBlockTokenIdentifier(); - decodedTokenId.readFields(new DataInputStream( - new ByteArrayInputStream(decodedToken.getIdentifier()))); - - Assert.assertEquals(decodedTokenId, tokenId); - Assert.assertEquals(decodedTokenId.getMaxLength(), maxLength); - - // Verify a decoded signed Token with public key(certificate) - boolean isValidToken = verifyTokenAsymmetric(decodedTokenId, decodedToken - .getPassword(), cert); - LOG.info("{} is {}", tokenId, isValidToken ? "valid." : "invalid."); - } - - - public byte[] signTokenAsymmetric(OzoneBlockTokenIdentifier tokenId, - PrivateKey privateKey) throws NoSuchAlgorithmException, - InvalidKeyException, SignatureException { - Signature rsaSignature = Signature.getInstance("SHA256withRSA"); - rsaSignature.initSign(privateKey); - rsaSignature.update(tokenId.getBytes()); - byte[] signature = rsaSignature.sign(); - return signature; - } - - public boolean verifyTokenAsymmetric(OzoneBlockTokenIdentifier tokenId, - byte[] signature, Certificate certificate) throws InvalidKeyException, - NoSuchAlgorithmException, SignatureException { - Signature rsaSignature = Signature.getInstance("SHA256withRSA"); - rsaSignature.initVerify(certificate); - rsaSignature.update(tokenId.getBytes()); - boolean isValid = rsaSignature.verify(signature); - return isValid; - } - - private byte[] signTokenSymmetric(OzoneBlockTokenIdentifier identifier, - Mac mac, SecretKey key) { - try { - mac.init(key); - } catch (InvalidKeyException ike) { - throw new IllegalArgumentException("Invalid key to HMAC computation", - ike); - } - return mac.doFinal(identifier.getBytes()); - } - - OzoneBlockTokenIdentifier generateTestToken() { - return new OzoneBlockTokenIdentifier(RandomStringUtils.randomAlphabetic(6), - RandomStringUtils.randomAlphabetic(5), - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), 1024768L); - } - - @Test - public void testAsymmetricTokenPerf() throws NoSuchAlgorithmException, - CertificateEncodingException, NoSuchProviderException, - InvalidKeyException, SignatureException { - final int testTokenCount = 1000; - List tokenIds = new ArrayList<>(); - List tokenPasswordAsym = new ArrayList<>(); - for (int i = 0; i < testTokenCount; i++) { - tokenIds.add(generateTestToken()); - } - - KeyPair kp = KeyStoreTestUtil.generateKeyPair("RSA"); - - // Create Ozone Master certificate (SCM CA issued cert) and key store - X509Certificate certificate; - certificate = KeyStoreTestUtil.generateCertificate("CN=OzoneMaster", - kp, 30, "SHA256withRSA"); - - long startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - tokenPasswordAsym.add( - signTokenAsymmetric(tokenIds.get(i), kp.getPrivate())); - } - long duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token sign time with HmacSha256(RSA/1024 key) is {} ns", - duration / testTokenCount); - - startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - verifyTokenAsymmetric(tokenIds.get(i), tokenPasswordAsym.get(i), - certificate); - } - duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token verify time with HmacSha256(RSA/1024 key) " - + "is {} ns", duration / testTokenCount); - } - - @Test - public void testSymmetricTokenPerf() { - String hmacSHA1 = "HmacSHA1"; - String hmacSHA256 = "HmacSHA256"; - - testSymmetricTokenPerfHelper(hmacSHA1, 64); - testSymmetricTokenPerfHelper(hmacSHA256, 1024); - } - - public void testSymmetricTokenPerfHelper(String hmacAlgorithm, int keyLen) { - final int testTokenCount = 1000; - List tokenIds = new ArrayList<>(); - List tokenPasswordSym = new ArrayList<>(); - for (int i = 0; i < testTokenCount; i++) { - tokenIds.add(generateTestToken()); - } - - KeyGenerator keyGen; - try { - keyGen = KeyGenerator.getInstance(hmacAlgorithm); - keyGen.init(keyLen); - } catch (NoSuchAlgorithmException nsa) { - throw new IllegalArgumentException("Can't find " + hmacAlgorithm + - " algorithm."); - } - - Mac mac; - try { - mac = Mac.getInstance(hmacAlgorithm); - } catch (NoSuchAlgorithmException nsa) { - throw new IllegalArgumentException("Can't find " + hmacAlgorithm + - " algorithm."); - } - - SecretKey secretKey = keyGen.generateKey(); - - long startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - tokenPasswordSym.add( - signTokenSymmetric(tokenIds.get(i), mac, secretKey)); - } - long duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token sign time with {}({} symmetric key) is {} ns", - hmacAlgorithm, keyLen, duration / testTokenCount); - } - - // TODO: verify certificate with a trust store - public boolean verifyCert(Certificate certificate) { - return true; - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/package-info.java deleted file mode 100644 index d0566557b5401..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains the block token related classes. - */ -package org.apache.hadoop.hdds.security.token; diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockApprover.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockApprover.java deleted file mode 100644 index a8fa0af7b515f..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockApprover.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.PKIProfile; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; - -import java.io.IOException; -import java.security.PrivateKey; -import java.util.Date; -import java.util.concurrent.CompletableFuture; - -/** - * A test approver class that makes testing easier. - */ -public class MockApprover extends BaseApprover { - - public MockApprover(PKIProfile pkiProfile, SecurityConfig config) { - super(pkiProfile, config); - } - - @Override - public CompletableFuture - inspectCSR(PKCS10CertificationRequest csr) { - return super.inspectCSR(csr); - } - - @Override - public X509CertificateHolder sign(SecurityConfig config, PrivateKey caPrivate, - X509CertificateHolder caCertificate, - Date validFrom, Date validTill, - PKCS10CertificationRequest request, - String scmId, String clusterId) - throws IOException, OperatorCreationException { - return null; - } - -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java deleted file mode 100644 index 1dea512e4c2c8..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import java.io.IOException; -import java.math.BigInteger; -import java.security.cert.X509Certificate; - -/** - * - */ -public class MockCAStore implements CertificateStore { - @Override - public void storeValidCertificate(BigInteger serialID, - X509Certificate certificate) - throws IOException { - - } - - @Override - public void revokeCertificate(BigInteger serialID) throws IOException { - - } - - @Override - public void removeExpiredCertificate(BigInteger serialID) - throws IOException { - - } - - @Override - public X509Certificate getCertificateByID(BigInteger serialID, - CertType certType) - throws IOException { - return null; - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java deleted file mode 100644 index 64eb4bafd681a..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.test.LambdaTestUtils; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.IOException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.cert.CertificateException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.function.Consumer; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.junit.Assert.*; - -/** - * Tests the Default CA Server. - */ -public class TestDefaultCAServer { - private static OzoneConfiguration conf = new OzoneConfiguration(); - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - private MockCAStore caStore; - - @Before - public void init() throws IOException { - conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().toString()); - caStore = new MockCAStore(); - } - - @Test - public void testInit() throws SCMSecurityException, CertificateException, - IOException { - SecurityConfig securityConfig = new SecurityConfig(conf); - CertificateServer testCA = new DefaultCAServer("testCA", - RandomStringUtils.randomAlphabetic(4), - RandomStringUtils.randomAlphabetic(4), caStore); - testCA.init(securityConfig, CertificateServer.CAType.SELF_SIGNED_CA); - X509CertificateHolder first = testCA.getCACertificate(); - assertNotNull(first); - //Init is idempotent. - testCA.init(securityConfig, CertificateServer.CAType.SELF_SIGNED_CA); - X509CertificateHolder second = testCA.getCACertificate(); - assertEquals(first, second); - - // we only support Self Signed CA for now. - try { - testCA.init(securityConfig, CertificateServer.CAType.INTERMEDIARY_CA); - fail("code should not reach here, exception should have been thrown."); - } catch (IllegalStateException e) { - // This is a run time exception, hence it is not caught by the junit - // expected Exception. - assertTrue(e.toString().contains("Not implemented")); - } - } - - @Test - public void testMissingCertificate() { - SecurityConfig securityConfig = new SecurityConfig(conf); - CertificateServer testCA = new DefaultCAServer("testCA", - RandomStringUtils.randomAlphabetic(4), - RandomStringUtils.randomAlphabetic(4), caStore); - Consumer caInitializer = - ((DefaultCAServer) testCA).processVerificationStatus( - DefaultCAServer.VerificationStatus.MISSING_CERTIFICATE); - try { - - caInitializer.accept(securityConfig); - fail("code should not reach here, exception should have been thrown."); - } catch (IllegalStateException e) { - // This also is a runtime exception. Hence not caught by junit expected - // exception. - assertTrue(e.toString().contains("Missing Root Certs")); - } - } - - @Test - public void testMissingKey() { - SecurityConfig securityConfig = new SecurityConfig(conf); - CertificateServer testCA = new DefaultCAServer("testCA", - RandomStringUtils.randomAlphabetic(4), - RandomStringUtils.randomAlphabetic(4), caStore); - Consumer caInitializer = - ((DefaultCAServer) testCA).processVerificationStatus( - DefaultCAServer.VerificationStatus.MISSING_KEYS); - try { - - caInitializer.accept(securityConfig); - fail("code should not reach here, exception should have been thrown."); - } catch (IllegalStateException e) { - // This also is a runtime exception. Hence not caught by junit expected - // exception. - assertTrue(e.toString().contains("Missing Keys")); - } - } - - /** - * The most important test of this test suite. This tests that we are able - * to create a Test CA, creates it own self-Signed CA and then issue a - * certificate based on a CSR. - * @throws SCMSecurityException - on ERROR. - * @throws ExecutionException - on ERROR. - * @throws InterruptedException - on ERROR. - * @throws NoSuchProviderException - on ERROR. - * @throws NoSuchAlgorithmException - on ERROR. - */ - @Test - public void testRequestCertificate() throws IOException, - ExecutionException, InterruptedException, - NoSuchProviderException, NoSuchAlgorithmException { - String scmId = RandomStringUtils.randomAlphabetic(4); - String clusterId = RandomStringUtils.randomAlphabetic(4); - KeyPair keyPair = - new HDDSKeyGenerator(conf).generateKey(); - PKCS10CertificationRequest csr = new CertificateSignRequest.Builder() - .addDnsName("hadoop.apache.org") - .addIpAddress("8.8.8.8") - .setCA(false) - .setClusterID(clusterId) - .setScmID(scmId) - .setSubject("Ozone Cluster") - .setConfiguration(conf) - .setKey(keyPair) - .build(); - - // Let us convert this to a string to mimic the common use case. - String csrString = CertificateSignRequest.getEncodedString(csr); - - CertificateServer testCA = new DefaultCAServer("testCA", - clusterId, scmId, caStore); - testCA.init(new SecurityConfig(conf), - CertificateServer.CAType.SELF_SIGNED_CA); - - Future holder = testCA.requestCertificate(csrString, - CertificateApprover.ApprovalType.TESTING_AUTOMATIC); - // Right now our calls are synchronous. Eventually this will have to wait. - assertTrue(holder.isDone()); - assertNotNull(holder.get()); - } - - /** - * Tests that we are able - * to create a Test CA, creates it own self-Signed CA and then issue a - * certificate based on a CSR when scmId and clusterId are not set in - * csr subject. - * @throws SCMSecurityException - on ERROR. - * @throws ExecutionException - on ERROR. - * @throws InterruptedException - on ERROR. - * @throws NoSuchProviderException - on ERROR. - * @throws NoSuchAlgorithmException - on ERROR. - */ - @Test - public void testRequestCertificateWithInvalidSubject() throws IOException, - ExecutionException, InterruptedException, - NoSuchProviderException, NoSuchAlgorithmException { - KeyPair keyPair = - new HDDSKeyGenerator(conf).generateKey(); - PKCS10CertificationRequest csr = new CertificateSignRequest.Builder() - .addDnsName("hadoop.apache.org") - .addIpAddress("8.8.8.8") - .setCA(false) - .setSubject("Ozone Cluster") - .setConfiguration(conf) - .setKey(keyPair) - .build(); - - // Let us convert this to a string to mimic the common use case. - String csrString = CertificateSignRequest.getEncodedString(csr); - - CertificateServer testCA = new DefaultCAServer("testCA", - RandomStringUtils.randomAlphabetic(4), - RandomStringUtils.randomAlphabetic(4), caStore); - testCA.init(new SecurityConfig(conf), - CertificateServer.CAType.SELF_SIGNED_CA); - - Future holder = testCA.requestCertificate(csrString, - CertificateApprover.ApprovalType.TESTING_AUTOMATIC); - // Right now our calls are synchronous. Eventually this will have to wait. - assertTrue(holder.isDone()); - assertNotNull(holder.get()); - } - - @Test - public void testRequestCertificateWithInvalidSubjectFailure() - throws Exception { - KeyPair keyPair = - new HDDSKeyGenerator(conf).generateKey(); - PKCS10CertificationRequest csr = new CertificateSignRequest.Builder() - .addDnsName("hadoop.apache.org") - .addIpAddress("8.8.8.8") - .setCA(false) - .setScmID("wrong one") - .setClusterID("223432rf") - .setSubject("Ozone Cluster") - .setConfiguration(conf) - .setKey(keyPair) - .build(); - - // Let us convert this to a string to mimic the common use case. - String csrString = CertificateSignRequest.getEncodedString(csr); - - CertificateServer testCA = new DefaultCAServer("testCA", - RandomStringUtils.randomAlphabetic(4), - RandomStringUtils.randomAlphabetic(4), caStore); - testCA.init(new SecurityConfig(conf), - CertificateServer.CAType.SELF_SIGNED_CA); - - LambdaTestUtils.intercept(ExecutionException.class, "ScmId and " + - "ClusterId in CSR subject are incorrect", - () -> { - Future holder = - testCA.requestCertificate(csrString, - CertificateApprover.ApprovalType.TESTING_AUTOMATIC); - holder.isDone(); - holder.get(); - }); - } - -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultProfile.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultProfile.java deleted file mode 100644 index f892b8d05dfa3..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultProfile.java +++ /dev/null @@ -1,364 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.DefaultProfile; -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; -import org.bouncycastle.asn1.x500.X500Name; -import org.bouncycastle.asn1.x500.X500NameBuilder; -import org.bouncycastle.asn1.x500.style.BCStyle; -import org.bouncycastle.asn1.x509.ExtendedKeyUsage; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.asn1.x509.Extensions; -import org.bouncycastle.asn1.x509.ExtensionsGenerator; -import org.bouncycastle.asn1.x509.GeneralName; -import org.bouncycastle.asn1.x509.GeneralNames; -import org.bouncycastle.asn1.x509.KeyPurposeId; -import org.bouncycastle.operator.ContentSigner; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.bouncycastle.pkcs.PKCS10CertificationRequestBuilder; -import org.bouncycastle.pkcs.PKCSException; -import org.bouncycastle.pkcs.jcajce.JcaPKCS10CertificationRequestBuilder; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.IOException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -/** - * Tests for the default PKI Profile. - */ -public class TestDefaultProfile { - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - - private OzoneConfiguration configuration; - private SecurityConfig securityConfig; - private DefaultProfile defaultProfile; - private MockApprover testApprover; - private KeyPair keyPair; - - @Before - public void setUp() throws Exception { - configuration = new OzoneConfiguration(); - configuration.set(OZONE_METADATA_DIRS, - temporaryFolder.newFolder().toString()); - securityConfig = new SecurityConfig(configuration); - defaultProfile = new DefaultProfile(); - testApprover = new MockApprover(defaultProfile, - securityConfig); - keyPair = new HDDSKeyGenerator(securityConfig).generateKey(); - } - - /** - * Tests the General Names that we support. The default profile supports only - * two names right now. - */ - @Test - public void testisSupportedGeneralName() { -// Positive tests - assertTrue(defaultProfile.isSupportedGeneralName(GeneralName.iPAddress)); - assertTrue(defaultProfile.isSupportedGeneralName(GeneralName.dNSName)); -// Negative Tests - assertFalse(defaultProfile.isSupportedGeneralName( - GeneralName.directoryName)); - assertFalse(defaultProfile.isSupportedGeneralName(GeneralName.rfc822Name)); - assertFalse(defaultProfile.isSupportedGeneralName(GeneralName.otherName)); - } - - /** - * Test valid keys are validated correctly. - * - * @throws SCMSecurityException - on Error. - * @throws PKCSException - on Error. - * @throws OperatorCreationException - on Error. - */ - @Test - public void testVerifyCertificate() throws SCMSecurityException, - PKCSException, OperatorCreationException { - PKCS10CertificationRequest csr = new CertificateSignRequest.Builder() - .addDnsName("hadoop.apache.org") - .addIpAddress("8.8.8.8") - .setCA(false) - .setClusterID("ClusterID") - .setScmID("SCMID") - .setSubject("Ozone Cluster") - .setConfiguration(configuration) - .setKey(keyPair) - .build(); - assertTrue(testApprover.verifyPkcs10Request(csr)); - } - - - - - /** - * Test invalid keys fail in the validation. - * - * @throws SCMSecurityException - on Error. - * @throws PKCSException - on Error. - * @throws OperatorCreationException - on Error. - * @throws NoSuchProviderException - on Error. - * @throws NoSuchAlgorithmException - on Error. - */ - @Test - public void testVerifyCertificateInvalidKeys() throws SCMSecurityException, - PKCSException, OperatorCreationException, - NoSuchProviderException, NoSuchAlgorithmException { - KeyPair newKeyPair = new HDDSKeyGenerator(securityConfig).generateKey(); - KeyPair wrongKey = new KeyPair(keyPair.getPublic(), - newKeyPair.getPrivate()); - PKCS10CertificationRequest csr = new CertificateSignRequest.Builder() - .addDnsName("hadoop.apache.org") - .addIpAddress("8.8.8.8") - .setCA(false) - .setClusterID("ClusterID") - .setScmID("SCMID") - .setSubject("Ozone Cluster") - .setConfiguration(configuration) - .setKey(wrongKey) - .build(); - // Signature verification should fail here, since the public/private key - // does not match. - assertFalse(testApprover.verifyPkcs10Request(csr)); - } - - /** - * Tests that normal valid extensions work with the default profile. - * - * @throws SCMSecurityException - on Error. - * @throws PKCSException - on Error. - * @throws OperatorCreationException - on Error. - */ - @Test - public void testExtensions() throws SCMSecurityException { - PKCS10CertificationRequest csr = new CertificateSignRequest.Builder() - .addDnsName("hadoop.apache.org") - .addIpAddress("192.10.234.6") - .setCA(false) - .setClusterID("ClusterID") - .setScmID("SCMID") - .setSubject("Ozone Cluster") - .setConfiguration(configuration) - .setKey(keyPair) - .build(); - assertTrue(testApprover.verfiyExtensions(csr)); - } - - /** - * Tests that invalid extensions cause a failure in validation. We will fail - * if CA extension is enabled. - * - * @throws SCMSecurityException - on Error. - */ - - @Test - public void testInvalidExtensionsWithCA() throws SCMSecurityException { - PKCS10CertificationRequest csr = new CertificateSignRequest.Builder() - .addDnsName("hadoop.apache.org") - .addIpAddress("192.10.234.6") - .setCA(true) - .setClusterID("ClusterID") - .setScmID("SCMID") - .setSubject("Ozone Cluster") - .setConfiguration(configuration) - .setKey(keyPair) - .build(); - assertFalse(testApprover.verfiyExtensions(csr)); - } - - /** - * Tests that invalid extensions cause a failure in validation. We will fail - * if rfc222 type names are added, we also add the extension as both - * critical and non-critical fashion to verify that the we catch both cases. - * - * @throws SCMSecurityException - on Error. - */ - - @Test - public void testInvalidExtensionsWithEmail() - throws IOException, OperatorCreationException { - Extensions emailExtension = getSANExtension(GeneralName.rfc822Name, - "bilbo@apache.org", false); - PKCS10CertificationRequest csr = getInvalidCSR(keyPair, emailExtension); - assertFalse(testApprover.verfiyExtensions(csr)); - - emailExtension = getSANExtension(GeneralName.rfc822Name, "bilbo" + - "@apache.org", true); - csr = getInvalidCSR(keyPair, emailExtension); - assertFalse(testApprover.verfiyExtensions(csr)); - - } - - /** - * Same test for URI. - * @throws IOException - On Error. - * @throws OperatorCreationException- on Error. - */ - @Test - public void testInvalidExtensionsWithURI() throws IOException, - OperatorCreationException { - Extensions oExtension = getSANExtension( - GeneralName.uniformResourceIdentifier, "s3g.ozone.org", false); - PKCS10CertificationRequest csr = getInvalidCSR(keyPair, oExtension); - assertFalse(testApprover.verfiyExtensions(csr)); - oExtension = getSANExtension(GeneralName.uniformResourceIdentifier, - "s3g.ozone.org", false); - csr = getInvalidCSR(keyPair, oExtension); - assertFalse(testApprover.verfiyExtensions(csr)); - } - - /** - * Assert that if DNS is marked critical our PKI profile will reject it. - * @throws IOException - on Error. - * @throws OperatorCreationException - on Error. - */ - @Test - public void testInvalidExtensionsWithCriticalDNS() throws IOException, - OperatorCreationException { - Extensions dnsExtension = getSANExtension(GeneralName.dNSName, - "ozone.hadoop.org", - true); - PKCS10CertificationRequest csr = getInvalidCSR(keyPair, dnsExtension); - assertFalse(testApprover.verfiyExtensions(csr)); - // This tests should pass, hence the assertTrue - dnsExtension = getSANExtension(GeneralName.dNSName, - "ozone.hadoop.org", - false); - csr = getInvalidCSR(keyPair, dnsExtension); - assertTrue(testApprover.verfiyExtensions(csr)); - } - - - /** - * Verify that valid Extended Key usage works as expected. - * @throws IOException - on Error. - * @throws OperatorCreationException - on Error. - */ - @Test - public void testValidExtendedKeyUsage() throws IOException, - OperatorCreationException { - Extensions extendedExtension = - getKeyUsageExtension(KeyPurposeId.id_kp_clientAuth, false); - PKCS10CertificationRequest csr = getInvalidCSR(keyPair, extendedExtension); - assertTrue(testApprover.verfiyExtensions(csr)); - - extendedExtension = getKeyUsageExtension(KeyPurposeId.id_kp_serverAuth, - false); - csr = getInvalidCSR(keyPair, extendedExtension); - assertTrue(testApprover.verfiyExtensions(csr)); - } - - - /** - * Verify that Invalid Extended Key usage works as expected, that is rejected. - * @throws IOException - on Error. - * @throws OperatorCreationException - on Error. - */ - @Test - public void testInValidExtendedKeyUsage() throws IOException, - OperatorCreationException { - Extensions extendedExtension = - getKeyUsageExtension(KeyPurposeId.id_kp_clientAuth, true); - PKCS10CertificationRequest csr = getInvalidCSR(keyPair, extendedExtension); - assertFalse(testApprover.verfiyExtensions(csr)); - - extendedExtension = getKeyUsageExtension(KeyPurposeId.id_kp_OCSPSigning, - false); - csr = getInvalidCSR(keyPair, extendedExtension); - assertFalse(testApprover.verfiyExtensions(csr)); - } - - - - /** - * Generates an CSR with the extension specified. - * This function is used to get an Invalid CSR and test that PKI profile - * rejects these invalid extensions, Hence the function name, by itself it - * is a well formed CSR, but our PKI profile will treat it as invalid CSR. - * - * @param kPair - Key Pair. - * @return CSR - PKCS10CertificationRequest - * @throws OperatorCreationException - on Error. - */ - private PKCS10CertificationRequest getInvalidCSR(KeyPair kPair, - Extensions extensions) throws OperatorCreationException { - X500NameBuilder namebuilder = - new X500NameBuilder(X500Name.getDefaultStyle()); - namebuilder.addRDN(BCStyle.CN, "invalidCert"); - PKCS10CertificationRequestBuilder p10Builder = - new JcaPKCS10CertificationRequestBuilder(namebuilder.build(), - keyPair.getPublic()); - p10Builder.addAttribute(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest, - extensions); - JcaContentSignerBuilder csBuilder = - new JcaContentSignerBuilder(this.securityConfig.getSignatureAlgo()); - ContentSigner signer = csBuilder.build(keyPair.getPrivate()); - return p10Builder.build(signer); - } - - /** - * Generate an Extension with rfc822Name. - * @param extensionCode - Extension Code. - * @param value - email to be added to the certificate - * @param critical - boolean value that marks the extension as critical. - * @return - An Extension list with email address. - * @throws IOException - */ - private Extensions getSANExtension(int extensionCode, String value, - boolean critical) throws IOException { - GeneralName extn = new GeneralName(extensionCode, - value); - ExtensionsGenerator extensionsGenerator = new ExtensionsGenerator(); - extensionsGenerator.addExtension(Extension.subjectAlternativeName, critical, - new GeneralNames(extn)); - return extensionsGenerator.generate(); - } - - /** - * Returns a extension with Extended Key usage. - * @param purposeId - Usage that we want to encode. - * @param critical - makes the extension critical. - * @return Extensions. - */ - private Extensions getKeyUsageExtension(KeyPurposeId purposeId, - boolean critical) throws IOException { - ExtendedKeyUsage extendedKeyUsage = new ExtendedKeyUsage(purposeId); - ExtensionsGenerator extensionsGenerator = new ExtensionsGenerator(); - extensionsGenerator.addExtension( - Extension.extendedKeyUsage, critical, extendedKeyUsage); - return extensionsGenerator.generate(); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java deleted file mode 100644 index 1d20a78dcc433..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Tests for Default CA. - */ -package org.apache.hadoop.hdds.security.x509.certificate.authority; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java deleted file mode 100644 index dcd9898cbeee0..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.certificate.client; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; -import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.bouncycastle.cert.X509CertificateHolder; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameter; - -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.KeyPair; -import java.security.cert.X509Certificate; -import java.util.Arrays; -import java.util.Collection; -import java.util.UUID; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.GETCERT; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.RECOVER; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.SUCCESS; -import static org.junit.Assert.assertTrue; - -/** - * Test class for {@link DefaultCertificateClient}. - */ -@RunWith(Parameterized.class) -@SuppressWarnings("visibilitymodifier") -public class TestCertificateClientInit { - - private KeyPair keyPair; - private String certSerialId = "3284792342234"; - private CertificateClient dnCertificateClient; - private CertificateClient omCertificateClient; - private HDDSKeyGenerator keyGenerator; - private Path metaDirPath; - private SecurityConfig securityConfig; - private KeyCodec dnKeyCodec; - private KeyCodec omKeyCodec; - private X509Certificate x509Certificate; - private final static String DN_COMPONENT = DNCertificateClient.COMPONENT_NAME; - private final static String OM_COMPONENT = OMCertificateClient.COMPONENT_NAME; - - @Parameter - public boolean pvtKeyPresent; - @Parameter(1) - public boolean pubKeyPresent; - @Parameter(2) - public boolean certPresent; - @Parameter(3) - public InitResponse expectedResult; - - @Parameterized.Parameters - public static Collection initData() { - return Arrays.asList(new Object[][]{ - {false, false, false, GETCERT}, - {false, false, true, FAILURE}, - {false, true, false, FAILURE}, - {true, false, false, FAILURE}, - {false, true, true, FAILURE}, - {true, true, false, GETCERT}, - {true, false, true, SUCCESS}, - {true, true, true, SUCCESS}}); - } - - @Before - public void setUp() throws Exception { - OzoneConfiguration config = new OzoneConfiguration(); - final String path = GenericTestUtils - .getTempPath(UUID.randomUUID().toString()); - metaDirPath = Paths.get(path, "test"); - config.set(HDDS_METADATA_DIR_NAME, metaDirPath.toString()); - securityConfig = new SecurityConfig(config); - keyGenerator = new HDDSKeyGenerator(securityConfig); - keyPair = keyGenerator.generateKey(); - x509Certificate = getX509Certificate(); - certSerialId = x509Certificate.getSerialNumber().toString(); - dnCertificateClient = new DNCertificateClient(securityConfig, - certSerialId); - omCertificateClient = new OMCertificateClient(securityConfig, - certSerialId); - dnKeyCodec = new KeyCodec(securityConfig, DN_COMPONENT); - omKeyCodec = new KeyCodec(securityConfig, OM_COMPONENT); - - Files.createDirectories(securityConfig.getKeyLocation(DN_COMPONENT)); - Files.createDirectories(securityConfig.getKeyLocation(OM_COMPONENT)); - } - - @After - public void tearDown() { - dnCertificateClient = null; - omCertificateClient = null; - FileUtils.deleteQuietly(metaDirPath.toFile()); - } - - - @Test - public void testInitDatanode() throws Exception { - if (pvtKeyPresent) { - dnKeyCodec.writePrivateKey(keyPair.getPrivate()); - } else { - FileUtils.deleteQuietly(Paths.get( - securityConfig.getKeyLocation(DN_COMPONENT).toString(), - securityConfig.getPrivateKeyFileName()).toFile()); - } - - if (pubKeyPresent) { - if (dnCertificateClient.getPublicKey() == null) { - dnKeyCodec.writePublicKey(keyPair.getPublic()); - } - } else { - FileUtils.deleteQuietly( - Paths.get(securityConfig.getKeyLocation(DN_COMPONENT).toString(), - securityConfig.getPublicKeyFileName()).toFile()); - } - - if (certPresent) { - CertificateCodec codec = new CertificateCodec(securityConfig, - DN_COMPONENT); - codec.writeCertificate(new X509CertificateHolder( - x509Certificate.getEncoded())); - } else { - FileUtils.deleteQuietly(Paths.get( - securityConfig.getKeyLocation(DN_COMPONENT).toString(), - securityConfig.getCertificateFileName()).toFile()); - } - InitResponse response = dnCertificateClient.init(); - - assertTrue(response.equals(expectedResult)); - - if (!response.equals(FAILURE)) { - assertTrue(OzoneSecurityUtil.checkIfFileExist( - securityConfig.getKeyLocation(DN_COMPONENT), - securityConfig.getPrivateKeyFileName())); - assertTrue(OzoneSecurityUtil.checkIfFileExist( - securityConfig.getKeyLocation(DN_COMPONENT), - securityConfig.getPublicKeyFileName())); - } - } - - @Test - public void testInitOzoneManager() throws Exception { - if (pvtKeyPresent) { - omKeyCodec.writePrivateKey(keyPair.getPrivate()); - } else { - FileUtils.deleteQuietly(Paths.get( - securityConfig.getKeyLocation(OM_COMPONENT).toString(), - securityConfig.getPrivateKeyFileName()).toFile()); - } - - if (pubKeyPresent) { - if (omCertificateClient.getPublicKey() == null) { - omKeyCodec.writePublicKey(keyPair.getPublic()); - } - } else { - FileUtils.deleteQuietly(Paths.get( - securityConfig.getKeyLocation(OM_COMPONENT).toString(), - securityConfig.getPublicKeyFileName()).toFile()); - } - - if (certPresent) { - CertificateCodec codec = new CertificateCodec(securityConfig, - OM_COMPONENT); - codec.writeCertificate(new X509CertificateHolder( - x509Certificate.getEncoded())); - } else { - FileUtils.deleteQuietly(Paths.get( - securityConfig.getKeyLocation(OM_COMPONENT).toString(), - securityConfig.getCertificateFileName()).toFile()); - } - InitResponse response = omCertificateClient.init(); - - if (pvtKeyPresent && pubKeyPresent & !certPresent) { - assertTrue(response.equals(RECOVER)); - } else { - assertTrue(response.equals(expectedResult)); - } - - if (!response.equals(FAILURE)) { - assertTrue(OzoneSecurityUtil.checkIfFileExist( - securityConfig.getKeyLocation(OM_COMPONENT), - securityConfig.getPrivateKeyFileName())); - assertTrue(OzoneSecurityUtil.checkIfFileExist( - securityConfig.getKeyLocation(OM_COMPONENT), - securityConfig.getPublicKeyFileName())); - } - } - - private X509Certificate getX509Certificate() throws Exception { - return KeyStoreTestUtil.generateCertificate( - "CN=Test", keyPair, 10, securityConfig.getSignatureAlgo()); - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java deleted file mode 100644 index f389cdb6d22ea..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java +++ /dev/null @@ -1,480 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.certificate.client; - -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; -import org.bouncycastle.cert.X509CertificateHolder; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.KeyPair; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.Signature; -import java.security.cert.X509Certificate; -import java.util.UUID; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.RandomStringUtils; - - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.LambdaTestUtils; - -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE; -import static org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec.getPEMEncodedString; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -/** - * Test class for {@link DefaultCertificateClient}. - */ -public class TestDefaultCertificateClient { - - private String certSerialId; - private X509Certificate x509Certificate; - private OMCertificateClient omCertClient; - private DNCertificateClient dnCertClient; - private HDDSKeyGenerator keyGenerator; - private Path omMetaDirPath; - private Path dnMetaDirPath; - private SecurityConfig omSecurityConfig; - private SecurityConfig dnSecurityConfig; - private final static String UTF = "UTF-8"; - private final static String DN_COMPONENT = DNCertificateClient.COMPONENT_NAME; - private final static String OM_COMPONENT = OMCertificateClient.COMPONENT_NAME; - private KeyCodec omKeyCodec; - private KeyCodec dnKeyCodec; - - @Before - public void setUp() throws Exception { - OzoneConfiguration config = new OzoneConfiguration(); - config.setStrings(OZONE_SCM_NAMES, "localhost"); - config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 2); - final String omPath = GenericTestUtils - .getTempPath(UUID.randomUUID().toString()); - final String dnPath = GenericTestUtils - .getTempPath(UUID.randomUUID().toString()); - - omMetaDirPath = Paths.get(omPath, "test"); - dnMetaDirPath = Paths.get(dnPath, "test"); - - config.set(HDDS_METADATA_DIR_NAME, omMetaDirPath.toString()); - omSecurityConfig = new SecurityConfig(config); - config.set(HDDS_METADATA_DIR_NAME, dnMetaDirPath.toString()); - dnSecurityConfig = new SecurityConfig(config); - - - keyGenerator = new HDDSKeyGenerator(omSecurityConfig); - omKeyCodec = new KeyCodec(omSecurityConfig, OM_COMPONENT); - dnKeyCodec = new KeyCodec(dnSecurityConfig, DN_COMPONENT); - - Files.createDirectories(omSecurityConfig.getKeyLocation(OM_COMPONENT)); - Files.createDirectories(dnSecurityConfig.getKeyLocation(DN_COMPONENT)); - x509Certificate = generateX509Cert(null); - certSerialId = x509Certificate.getSerialNumber().toString(); - getCertClient(); - } - - private void getCertClient() { - omCertClient = new OMCertificateClient(omSecurityConfig, certSerialId); - dnCertClient = new DNCertificateClient(dnSecurityConfig, certSerialId); - } - - @After - public void tearDown() { - omCertClient = null; - dnCertClient = null; - FileUtils.deleteQuietly(omMetaDirPath.toFile()); - FileUtils.deleteQuietly(dnMetaDirPath.toFile()); - } - - /** - * Tests: 1. getPrivateKey 2. getPublicKey 3. storePrivateKey 4. - * storePublicKey - */ - @Test - public void testKeyOperations() throws Exception { - cleanupOldKeyPair(); - PrivateKey pvtKey = omCertClient.getPrivateKey(); - PublicKey publicKey = omCertClient.getPublicKey(); - assertNull(publicKey); - assertNull(pvtKey); - - KeyPair keyPair = generateKeyPairFiles(); - pvtKey = omCertClient.getPrivateKey(); - assertNotNull(pvtKey); - assertEquals(pvtKey, keyPair.getPrivate()); - - publicKey = dnCertClient.getPublicKey(); - assertNotNull(publicKey); - assertEquals(publicKey, keyPair.getPublic()); - } - - private KeyPair generateKeyPairFiles() throws Exception { - cleanupOldKeyPair(); - KeyPair keyPair = keyGenerator.generateKey(); - omKeyCodec.writePrivateKey(keyPair.getPrivate()); - omKeyCodec.writePublicKey(keyPair.getPublic()); - - dnKeyCodec.writePrivateKey(keyPair.getPrivate()); - dnKeyCodec.writePublicKey(keyPair.getPublic()); - return keyPair; - } - - private void cleanupOldKeyPair() { - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPrivateKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPublicKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), - dnSecurityConfig.getPrivateKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), - dnSecurityConfig.getPublicKeyFileName()).toFile()); - } - - /** - * Tests: 1. storeCertificate 2. getCertificate 3. verifyCertificate - */ - @Test - public void testCertificateOps() throws Exception { - X509Certificate cert = omCertClient.getCertificate(); - assertNull(cert); - omCertClient.storeCertificate(getPEMEncodedString(x509Certificate), - true); - - cert = omCertClient.getCertificate( - x509Certificate.getSerialNumber().toString()); - assertNotNull(cert); - assertTrue(cert.getEncoded().length > 0); - assertEquals(cert, x509Certificate); - - // TODO: test verifyCertificate once implemented. - } - - private X509Certificate generateX509Cert(KeyPair keyPair) throws Exception { - if (keyPair == null) { - keyPair = generateKeyPairFiles(); - } - return KeyStoreTestUtil.generateCertificate("CN=Test", keyPair, 30, - omSecurityConfig.getSignatureAlgo()); - } - - @Test - public void testSignDataStream() throws Exception { - String data = RandomStringUtils.random(100, UTF); - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPrivateKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPublicKeyFileName()).toFile()); - - // Expect error when there is no private key to sign. - LambdaTestUtils.intercept(IOException.class, "Error while " + - "signing the stream", - () -> omCertClient.signDataStream(IOUtils.toInputStream(data, - UTF))); - - generateKeyPairFiles(); - byte[] sign = omCertClient.signDataStream(IOUtils.toInputStream(data, - UTF)); - validateHash(sign, data.getBytes()); - } - - /** - * Validate hash using public key of KeyPair. - */ - private void validateHash(byte[] hash, byte[] data) - throws Exception { - Signature rsaSignature = - Signature.getInstance(omSecurityConfig.getSignatureAlgo(), - omSecurityConfig.getProvider()); - rsaSignature.initVerify(omCertClient.getPublicKey()); - rsaSignature.update(data); - Assert.assertTrue(rsaSignature.verify(hash)); - } - - /** - * Tests: 1. verifySignature - */ - @Test - public void verifySignatureStream() throws Exception { - String data = RandomStringUtils.random(500, UTF); - byte[] sign = omCertClient.signDataStream(IOUtils.toInputStream(data, - UTF)); - - // Positive tests. - assertTrue(omCertClient.verifySignature(data.getBytes(), sign, - x509Certificate)); - assertTrue(omCertClient.verifySignature(IOUtils.toInputStream(data, UTF), - sign, x509Certificate)); - - // Negative tests. - assertFalse(omCertClient.verifySignature(data.getBytes(), - "abc".getBytes(), x509Certificate)); - assertFalse(omCertClient.verifySignature(IOUtils.toInputStream(data, - UTF), "abc".getBytes(), x509Certificate)); - - } - - /** - * Tests: 1. verifySignature - */ - @Test - public void verifySignatureDataArray() throws Exception { - String data = RandomStringUtils.random(500, UTF); - byte[] sign = omCertClient.signData(data.getBytes()); - - // Positive tests. - assertTrue(omCertClient.verifySignature(data.getBytes(), sign, - x509Certificate)); - assertTrue(omCertClient.verifySignature(IOUtils.toInputStream(data, UTF), - sign, x509Certificate)); - - // Negative tests. - assertFalse(omCertClient.verifySignature(data.getBytes(), - "abc".getBytes(), x509Certificate)); - assertFalse(omCertClient.verifySignature(IOUtils.toInputStream(data, - UTF), "abc".getBytes(), x509Certificate)); - - } - - @Test - public void queryCertificate() throws Exception { - LambdaTestUtils.intercept(UnsupportedOperationException.class, - "Operation not supported", - () -> omCertClient.queryCertificate("")); - } - - @Test - public void testCertificateLoadingOnInit() throws Exception { - KeyPair keyPair = keyGenerator.generateKey(); - X509Certificate cert1 = generateX509Cert(keyPair); - X509Certificate cert2 = generateX509Cert(keyPair); - X509Certificate cert3 = generateX509Cert(keyPair); - - Path certPath = dnSecurityConfig.getCertificateLocation(DN_COMPONENT); - CertificateCodec codec = new CertificateCodec(dnSecurityConfig, - DN_COMPONENT); - - // Certificate not found. - LambdaTestUtils.intercept(CertificateException.class, "Error while" + - " getting certificate", - () -> dnCertClient.getCertificate(cert1.getSerialNumber() - .toString())); - LambdaTestUtils.intercept(CertificateException.class, "Error while" + - " getting certificate", - () -> dnCertClient.getCertificate(cert2.getSerialNumber() - .toString())); - LambdaTestUtils.intercept(CertificateException.class, "Error while" + - " getting certificate", - () -> dnCertClient.getCertificate(cert3.getSerialNumber() - .toString())); - codec.writeCertificate(certPath, "1.crt", - getPEMEncodedString(cert1), true); - codec.writeCertificate(certPath, "2.crt", - getPEMEncodedString(cert2), true); - codec.writeCertificate(certPath, "3.crt", - getPEMEncodedString(cert3), true); - - // Re instantiate DN client which will load certificates from filesystem. - dnCertClient = new DNCertificateClient(dnSecurityConfig, certSerialId); - - assertNotNull(dnCertClient.getCertificate(cert1.getSerialNumber() - .toString())); - assertNotNull(dnCertClient.getCertificate(cert2.getSerialNumber() - .toString())); - assertNotNull(dnCertClient.getCertificate(cert3.getSerialNumber() - .toString())); - - } - - @Test - public void testStoreCertificate() throws Exception { - KeyPair keyPair = keyGenerator.generateKey(); - X509Certificate cert1 = generateX509Cert(keyPair); - X509Certificate cert2 = generateX509Cert(keyPair); - X509Certificate cert3 = generateX509Cert(keyPair); - - dnCertClient.storeCertificate(getPEMEncodedString(cert1), true); - dnCertClient.storeCertificate(getPEMEncodedString(cert2), true); - dnCertClient.storeCertificate(getPEMEncodedString(cert3), true); - - assertNotNull(dnCertClient.getCertificate(cert1.getSerialNumber() - .toString())); - assertNotNull(dnCertClient.getCertificate(cert2.getSerialNumber() - .toString())); - assertNotNull(dnCertClient.getCertificate(cert3.getSerialNumber() - .toString())); - } - - @Test - public void testInitCertAndKeypairValidationFailures() throws Exception { - - GenericTestUtils.LogCapturer dnClientLog = GenericTestUtils.LogCapturer - .captureLogs(dnCertClient.getLogger()); - GenericTestUtils.LogCapturer omClientLog = GenericTestUtils.LogCapturer - .captureLogs(omCertClient.getLogger()); - KeyPair keyPair = keyGenerator.generateKey(); - KeyPair keyPair2 = keyGenerator.generateKey(); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - - // Case 1. Expect failure when keypair validation fails. - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPrivateKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPublicKeyFileName()).toFile()); - - - FileUtils.deleteQuietly(Paths.get( - dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), - dnSecurityConfig.getPrivateKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), - dnSecurityConfig.getPublicKeyFileName()).toFile()); - - omKeyCodec.writePrivateKey(keyPair.getPrivate()); - omKeyCodec.writePublicKey(keyPair2.getPublic()); - - dnKeyCodec.writePrivateKey(keyPair.getPrivate()); - dnKeyCodec.writePublicKey(keyPair2.getPublic()); - - - // Check for DN. - assertEquals(dnCertClient.init(), FAILURE); - assertTrue(dnClientLog.getOutput().contains("Keypair validation " + - "failed")); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - - // Check for OM. - assertEquals(omCertClient.init(), FAILURE); - assertTrue(omClientLog.getOutput().contains("Keypair validation " + - "failed")); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - - // Case 2. Expect failure when certificate is generated from different - // private key and keypair validation fails. - getCertClient(); - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getCertificateFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), - dnSecurityConfig.getCertificateFileName()).toFile()); - - CertificateCodec omCertCodec = new CertificateCodec(omSecurityConfig, - OM_COMPONENT); - omCertCodec.writeCertificate(new X509CertificateHolder( - x509Certificate.getEncoded())); - - CertificateCodec dnCertCodec = new CertificateCodec(dnSecurityConfig, - DN_COMPONENT); - dnCertCodec.writeCertificate(new X509CertificateHolder( - x509Certificate.getEncoded())); - // Check for DN. - assertEquals(dnCertClient.init(), FAILURE); - assertTrue(dnClientLog.getOutput().contains("Keypair validation " + - "failed")); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - - // Check for OM. - assertEquals(omCertClient.init(), FAILURE); - assertTrue(omClientLog.getOutput().contains("Keypair validation failed")); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - - // Case 3. Expect failure when certificate is generated from different - // private key and certificate validation fails. - - // Re write the correct public key. - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPublicKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), - dnSecurityConfig.getPublicKeyFileName()).toFile()); - getCertClient(); - omKeyCodec.writePublicKey(keyPair.getPublic()); - dnKeyCodec.writePublicKey(keyPair.getPublic()); - - // Check for DN. - assertEquals(dnCertClient.init(), FAILURE); - assertTrue(dnClientLog.getOutput().contains("Stored certificate is " + - "generated with different")); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - - //Check for OM. - assertEquals(omCertClient.init(), FAILURE); - assertTrue(omClientLog.getOutput().contains("Stored certificate is " + - "generated with different")); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - - // Case 4. Failure when public key recovery fails. - getCertClient(); - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPublicKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), - dnSecurityConfig.getPublicKeyFileName()).toFile()); - - // Check for DN. - assertEquals(dnCertClient.init(), FAILURE); - assertTrue(dnClientLog.getOutput().contains("Can't recover public key")); - - // Check for OM. - assertEquals(omCertClient.init(), FAILURE); - assertTrue(omClientLog.getOutput().contains("Can't recover public key")); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateCodec.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateCodec.java deleted file mode 100644 index ded52068395ff..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateCodec.java +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.utils; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.bouncycastle.cert.X509CertificateHolder; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.File; -import java.io.IOException; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.time.LocalDate; -import java.time.temporal.ChronoUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -/** - * Tests the Certificate codecs. - */ -public class TestCertificateCodec { - private static OzoneConfiguration conf = new OzoneConfiguration(); - private static final String COMPONENT = "test"; - private SecurityConfig securityConfig; - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - - @Before - public void init() throws IOException { - conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().toString()); - securityConfig = new SecurityConfig(conf); - } - - /** - * This test converts a X509Certificate Holder object to a PEM encoded String, - * then creates a new X509Certificate object to verify that we are able to - * serialize and deserialize correctly. we follow up with converting these - * objects to standard JCA x509Certificate objects. - * - * @throws NoSuchProviderException - on Error. - * @throws NoSuchAlgorithmException - on Error. - * @throws IOException - on Error. - * @throws SCMSecurityException - on Error. - * @throws CertificateException - on Error. - */ - @Test - public void testGetPEMEncodedString() - throws NoSuchProviderException, NoSuchAlgorithmException, - IOException, SCMSecurityException, CertificateException { - HDDSKeyGenerator keyGenerator = - new HDDSKeyGenerator(conf); - X509CertificateHolder cert = - SelfSignedCertificate.newBuilder() - .setSubject(RandomStringUtils.randomAlphabetic(4)) - .setClusterID(RandomStringUtils.randomAlphabetic(4)) - .setScmID(RandomStringUtils.randomAlphabetic(4)) - .setBeginDate(LocalDate.now()) - .setEndDate(LocalDate.now().plus(1, ChronoUnit.DAYS)) - .setConfiguration(keyGenerator.getSecurityConfig() - .getConfiguration()) - .setKey(keyGenerator.generateKey()) - .makeCA() - .build(); - CertificateCodec codec = new CertificateCodec(securityConfig, COMPONENT); - String pemString = codec.getPEMEncodedString(cert); - assertTrue(pemString.startsWith(CertificateCodec.BEGIN_CERT)); - assertTrue(pemString.endsWith(CertificateCodec.END_CERT + "\n")); - - // Read back the certificate and verify that all the comparisons pass. - X509CertificateHolder newCert = - codec.getCertificateHolder(codec.getX509Certificate(pemString)); - assertEquals(cert, newCert); - - // Just make sure we can decode both these classes to Java Std. lIb classes. - X509Certificate firstCert = CertificateCodec.getX509Certificate(cert); - X509Certificate secondCert = CertificateCodec.getX509Certificate(newCert); - assertEquals(firstCert, secondCert); - } - - /** - * tests writing and reading certificates in PEM encoded form. - * - * @throws NoSuchProviderException - on Error. - * @throws NoSuchAlgorithmException - on Error. - * @throws IOException - on Error. - * @throws SCMSecurityException - on Error. - * @throws CertificateException - on Error. - */ - @Test - public void testwriteCertificate() throws NoSuchProviderException, - NoSuchAlgorithmException, IOException, SCMSecurityException, - CertificateException { - HDDSKeyGenerator keyGenerator = - new HDDSKeyGenerator(conf); - X509CertificateHolder cert = - SelfSignedCertificate.newBuilder() - .setSubject(RandomStringUtils.randomAlphabetic(4)) - .setClusterID(RandomStringUtils.randomAlphabetic(4)) - .setScmID(RandomStringUtils.randomAlphabetic(4)) - .setBeginDate(LocalDate.now()) - .setEndDate(LocalDate.now().plus(1, ChronoUnit.DAYS)) - .setConfiguration(keyGenerator.getSecurityConfig() - .getConfiguration()) - .setKey(keyGenerator.generateKey()) - .makeCA() - .build(); - CertificateCodec codec = new CertificateCodec(securityConfig, COMPONENT); - String pemString = codec.getPEMEncodedString(cert); - File basePath = temporaryFolder.newFolder(); - if (!basePath.exists()) { - Assert.assertTrue(basePath.mkdirs()); - } - codec.writeCertificate(basePath.toPath(), "pemcertificate.crt", - pemString, false); - X509CertificateHolder certHolder = - codec.readCertificate(basePath.toPath(), "pemcertificate.crt"); - assertNotNull(certHolder); - assertEquals(cert.getSerialNumber(), certHolder.getSerialNumber()); - } - - /** - * Tests reading and writing certificates in DER form. - * - * @throws IOException - on Error. - * @throws SCMSecurityException - on Error. - * @throws CertificateException - on Error. - * @throws NoSuchProviderException - on Error. - * @throws NoSuchAlgorithmException - on Error. - */ - @Test - public void testwriteCertificateDefault() - throws IOException, SCMSecurityException, CertificateException, - NoSuchProviderException, NoSuchAlgorithmException { - HDDSKeyGenerator keyGenerator = - new HDDSKeyGenerator(conf); - X509CertificateHolder cert = - SelfSignedCertificate.newBuilder() - .setSubject(RandomStringUtils.randomAlphabetic(4)) - .setClusterID(RandomStringUtils.randomAlphabetic(4)) - .setScmID(RandomStringUtils.randomAlphabetic(4)) - .setBeginDate(LocalDate.now()) - .setEndDate(LocalDate.now().plus(1, ChronoUnit.DAYS)) - .setConfiguration(keyGenerator.getSecurityConfig() - .getConfiguration()) - .setKey(keyGenerator.generateKey()) - .makeCA() - .build(); - CertificateCodec codec = new CertificateCodec(securityConfig, COMPONENT); - codec.writeCertificate(cert); - X509CertificateHolder certHolder = codec.readCertificate(); - assertNotNull(certHolder); - assertEquals(cert.getSerialNumber(), certHolder.getSerialNumber()); - } - - /** - * Tests writing to non-default certificate file name. - * - * @throws IOException - on Error. - * @throws SCMSecurityException - on Error. - * @throws NoSuchProviderException - on Error. - * @throws NoSuchAlgorithmException - on Error. - * @throws CertificateException - on Error. - */ - @Test - public void writeCertificate2() throws IOException, SCMSecurityException, - NoSuchProviderException, NoSuchAlgorithmException, CertificateException { - HDDSKeyGenerator keyGenerator = - new HDDSKeyGenerator(conf); - X509CertificateHolder cert = - SelfSignedCertificate.newBuilder() - .setSubject(RandomStringUtils.randomAlphabetic(4)) - .setClusterID(RandomStringUtils.randomAlphabetic(4)) - .setScmID(RandomStringUtils.randomAlphabetic(4)) - .setBeginDate(LocalDate.now()) - .setEndDate(LocalDate.now().plus(1, ChronoUnit.DAYS)) - .setConfiguration(keyGenerator.getSecurityConfig() - .getConfiguration()) - .setKey(keyGenerator.generateKey()) - .makeCA() - .build(); - CertificateCodec codec = - new CertificateCodec(keyGenerator.getSecurityConfig(), "ca"); - codec.writeCertificate(cert, "newcert.crt", false); - // Rewrite with force support - codec.writeCertificate(cert, "newcert.crt", true); - X509CertificateHolder x509CertificateHolder = - codec.readCertificate(codec.getLocation(), "newcert.crt"); - assertNotNull(x509CertificateHolder); - - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java deleted file mode 100644 index 4551f29ee242a..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - Tests for Certificate helpers. - */ -package org.apache.hadoop.hdds.security.x509.certificate.utils; - diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java deleted file mode 100644 index 5720d27b1618f..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.certificates; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil; -import org.bouncycastle.asn1.ASN1Sequence; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.asn1.x509.Extensions; -import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo; -import org.bouncycastle.operator.ContentVerifierProvider; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.operator.jcajce.JcaContentVerifierProviderBuilder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.bouncycastle.pkcs.PKCSException; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.IOException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.util.UUID; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; - -/** - * Certificate Signing Request. - */ -public class TestCertificateSignRequest { - - private static OzoneConfiguration conf = new OzoneConfiguration(); - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - private SecurityConfig securityConfig; - - @Before - public void init() throws IOException { - conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().toString()); - securityConfig = new SecurityConfig(conf); - } - - @Test - public void testGenerateCSR() throws NoSuchProviderException, - NoSuchAlgorithmException, SCMSecurityException, - OperatorCreationException, PKCSException { - String clusterID = UUID.randomUUID().toString(); - String scmID = UUID.randomUUID().toString(); - String subject = "DN001"; - HDDSKeyGenerator keyGen = - new HDDSKeyGenerator(securityConfig.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - - CertificateSignRequest.Builder builder = - new CertificateSignRequest.Builder() - .setSubject(subject) - .setScmID(scmID) - .setClusterID(clusterID) - .setKey(keyPair) - .setConfiguration(conf); - PKCS10CertificationRequest csr = builder.build(); - - // Check the Subject Name is in the expected format. - String dnName = String.format(SecurityUtil.getDistinguishedNameFormat(), - subject, scmID, clusterID); - Assert.assertEquals(csr.getSubject().toString(), dnName); - - // Verify the public key info match - byte[] encoded = keyPair.getPublic().getEncoded(); - SubjectPublicKeyInfo subjectPublicKeyInfo = - SubjectPublicKeyInfo.getInstance(ASN1Sequence.getInstance(encoded)); - SubjectPublicKeyInfo csrPublicKeyInfo = csr.getSubjectPublicKeyInfo(); - Assert.assertEquals(csrPublicKeyInfo, subjectPublicKeyInfo); - - // Verify CSR with attribute for extensions - Assert.assertEquals(1, csr.getAttributes().length); - Extensions extensions = SecurityUtil.getPkcs9Extensions(csr); - - // Verify key usage extension - Extension keyUsageExt = extensions.getExtension(Extension.keyUsage); - Assert.assertEquals(true, keyUsageExt.isCritical()); - - - // Verify San extension not set - Assert.assertEquals(null, - extensions.getExtension(Extension.subjectAlternativeName)); - - // Verify signature in CSR - ContentVerifierProvider verifierProvider = - new JcaContentVerifierProviderBuilder().setProvider(securityConfig - .getProvider()).build(csr.getSubjectPublicKeyInfo()); - Assert.assertEquals(true, csr.isSignatureValid(verifierProvider)); - } - - @Test - public void testGenerateCSRwithSan() throws NoSuchProviderException, - NoSuchAlgorithmException, SCMSecurityException, - OperatorCreationException, PKCSException { - String clusterID = UUID.randomUUID().toString(); - String scmID = UUID.randomUUID().toString(); - String subject = "DN001"; - HDDSKeyGenerator keyGen = - new HDDSKeyGenerator(securityConfig.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - - CertificateSignRequest.Builder builder = - new CertificateSignRequest.Builder() - .setSubject(subject) - .setScmID(scmID) - .setClusterID(clusterID) - .setKey(keyPair) - .setConfiguration(conf); - - // Multi-home - builder.addIpAddress("192.168.1.1"); - builder.addIpAddress("192.168.2.1"); - - builder.addDnsName("dn1.abc.com"); - - PKCS10CertificationRequest csr = builder.build(); - - // Check the Subject Name is in the expected format. - String dnName = String.format(SecurityUtil.getDistinguishedNameFormat(), - subject, scmID, clusterID); - Assert.assertEquals(csr.getSubject().toString(), dnName); - - // Verify the public key info match - byte[] encoded = keyPair.getPublic().getEncoded(); - SubjectPublicKeyInfo subjectPublicKeyInfo = - SubjectPublicKeyInfo.getInstance(ASN1Sequence.getInstance(encoded)); - SubjectPublicKeyInfo csrPublicKeyInfo = csr.getSubjectPublicKeyInfo(); - Assert.assertEquals(csrPublicKeyInfo, subjectPublicKeyInfo); - - // Verify CSR with attribute for extensions - Assert.assertEquals(1, csr.getAttributes().length); - Extensions extensions = SecurityUtil.getPkcs9Extensions(csr); - - // Verify key usage extension - Extension sanExt = extensions.getExtension(Extension.keyUsage); - Assert.assertEquals(true, sanExt.isCritical()); - - - // Verify signature in CSR - ContentVerifierProvider verifierProvider = - new JcaContentVerifierProviderBuilder().setProvider(securityConfig - .getProvider()).build(csr.getSubjectPublicKeyInfo()); - Assert.assertEquals(true, csr.isSignatureValid(verifierProvider)); - } - - @Test - public void testGenerateCSRWithInvalidParams() throws NoSuchProviderException, - NoSuchAlgorithmException, SCMSecurityException { - String clusterID = UUID.randomUUID().toString(); - String scmID = UUID.randomUUID().toString(); - String subject = "DN001"; - HDDSKeyGenerator keyGen = - new HDDSKeyGenerator(securityConfig.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - - CertificateSignRequest.Builder builder = - new CertificateSignRequest.Builder() - .setSubject(subject) - .setScmID(scmID) - .setClusterID(clusterID) - .setKey(keyPair) - .setConfiguration(conf); - - try { - builder.setKey(null); - builder.build(); - Assert.fail("Null Key should have failed."); - } catch (NullPointerException | IllegalArgumentException e) { - builder.setKey(keyPair); - } - - // Now try with blank/null Subject. - try { - builder.setSubject(null); - builder.build(); - Assert.fail("Null/Blank Subject should have thrown."); - } catch (IllegalArgumentException e) { - builder.setSubject(subject); - } - - try { - builder.setSubject(""); - builder.build(); - Assert.fail("Null/Blank Subject should have thrown."); - } catch (IllegalArgumentException e) { - builder.setSubject(subject); - } - - // Now try with invalid IP address - try { - builder.addIpAddress("255.255.255.*"); - builder.build(); - Assert.fail("Invalid ip address"); - } catch (IllegalArgumentException e) { - } - - PKCS10CertificationRequest csr = builder.build(); - - // Check the Subject Name is in the expected format. - String dnName = String.format(SecurityUtil.getDistinguishedNameFormat(), - subject, scmID, clusterID); - Assert.assertEquals(csr.getSubject().toString(), dnName); - - // Verify the public key info match - byte[] encoded = keyPair.getPublic().getEncoded(); - SubjectPublicKeyInfo subjectPublicKeyInfo = - SubjectPublicKeyInfo.getInstance(ASN1Sequence.getInstance(encoded)); - SubjectPublicKeyInfo csrPublicKeyInfo = csr.getSubjectPublicKeyInfo(); - Assert.assertEquals(csrPublicKeyInfo, subjectPublicKeyInfo); - - // Verify CSR with attribute for extensions - Assert.assertEquals(1, csr.getAttributes().length); - } - - @Test - public void testCsrSerialization() throws NoSuchProviderException, - NoSuchAlgorithmException, SCMSecurityException, IOException { - String clusterID = UUID.randomUUID().toString(); - String scmID = UUID.randomUUID().toString(); - String subject = "DN001"; - HDDSKeyGenerator keyGen = - new HDDSKeyGenerator(securityConfig.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - - CertificateSignRequest.Builder builder = - new CertificateSignRequest.Builder() - .setSubject(subject) - .setScmID(scmID) - .setClusterID(clusterID) - .setKey(keyPair) - .setConfiguration(conf); - - PKCS10CertificationRequest csr = builder.build(); - byte[] csrBytes = csr.getEncoded(); - - // Verify de-serialized CSR matches with the original CSR - PKCS10CertificationRequest dsCsr = new PKCS10CertificationRequest(csrBytes); - Assert.assertEquals(csr, dsCsr); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java deleted file mode 100644 index 02d007864426f..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificates; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.IOException; -import java.math.BigInteger; -import java.security.InvalidKeyException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.SignatureException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.time.LocalDate; -import java.time.temporal.ChronoUnit; -import java.util.Date; -import java.util.UUID; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; - -/** - * Test Class for Root Certificate generation. - */ -public class TestRootCertificate { - private static OzoneConfiguration conf = new OzoneConfiguration(); - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - private SecurityConfig securityConfig; - - @Before - public void init() throws IOException { - conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().toString()); - securityConfig = new SecurityConfig(conf); - } - - @Test - public void testAllFieldsAreExpected() - throws SCMSecurityException, NoSuchProviderException, - NoSuchAlgorithmException, CertificateException, - SignatureException, InvalidKeyException, IOException { - LocalDate notBefore = LocalDate.now(); - LocalDate notAfter = notBefore.plus(365, ChronoUnit.DAYS); - String clusterID = UUID.randomUUID().toString(); - String scmID = UUID.randomUUID().toString(); - String subject = "testRootCert"; - HDDSKeyGenerator keyGen = - new HDDSKeyGenerator(securityConfig.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - - SelfSignedCertificate.Builder builder = - SelfSignedCertificate.newBuilder() - .setBeginDate(notBefore) - .setEndDate(notAfter) - .setClusterID(clusterID) - .setScmID(scmID) - .setSubject(subject) - .setKey(keyPair) - .setConfiguration(conf); - - X509CertificateHolder certificateHolder = builder.build(); - - //Assert that we indeed have a self signed certificate. - Assert.assertEquals(certificateHolder.getIssuer(), - certificateHolder.getSubject()); - - - // Make sure that NotBefore is before the current Date - Date invalidDate = java.sql.Date.valueOf( - notBefore.minus(1, ChronoUnit.DAYS)); - Assert.assertFalse( - certificateHolder.getNotBefore() - .before(invalidDate)); - - //Make sure the end date is honored. - invalidDate = java.sql.Date.valueOf( - notAfter.plus(1, ChronoUnit.DAYS)); - Assert.assertFalse( - certificateHolder.getNotAfter() - .after(invalidDate)); - - // Check the Subject Name and Issuer Name is in the expected format. - String dnName = String.format(SelfSignedCertificate.getNameFormat(), - subject, scmID, clusterID); - Assert.assertEquals(certificateHolder.getIssuer().toString(), dnName); - Assert.assertEquals(certificateHolder.getSubject().toString(), dnName); - - // We did not ask for this Certificate to be a CertificateServer - // certificate, hence that - // extension should be null. - Assert.assertNull( - certificateHolder.getExtension(Extension.basicConstraints)); - - // Extract the Certificate and verify that certificate matches the public - // key. - X509Certificate cert = - new JcaX509CertificateConverter().getCertificate(certificateHolder); - cert.verify(keyPair.getPublic()); - } - - @Test - public void testCACert() - throws SCMSecurityException, NoSuchProviderException, - NoSuchAlgorithmException, IOException { - LocalDate notBefore = LocalDate.now(); - LocalDate notAfter = notBefore.plus(365, ChronoUnit.DAYS); - String clusterID = UUID.randomUUID().toString(); - String scmID = UUID.randomUUID().toString(); - String subject = "testRootCert"; - HDDSKeyGenerator keyGen = - new HDDSKeyGenerator(securityConfig.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - - SelfSignedCertificate.Builder builder = - SelfSignedCertificate.newBuilder() - .setBeginDate(notBefore) - .setEndDate(notAfter) - .setClusterID(clusterID) - .setScmID(scmID) - .setSubject(subject) - .setKey(keyPair) - .setConfiguration(conf) - .makeCA(); - - X509CertificateHolder certificateHolder = builder.build(); - // This time we asked for a CertificateServer Certificate, make sure that - // extension is - // present and valid. - Extension basicExt = - certificateHolder.getExtension(Extension.basicConstraints); - - Assert.assertNotNull(basicExt); - Assert.assertTrue(basicExt.isCritical()); - - // Since this code assigns ONE for the root certificate, we check if the - // serial number is the expected number. - Assert.assertEquals(certificateHolder.getSerialNumber(), BigInteger.ONE); - } - - @Test - public void testInvalidParamFails() - throws SCMSecurityException, NoSuchProviderException, - NoSuchAlgorithmException, IOException { - LocalDate notBefore = LocalDate.now(); - LocalDate notAfter = notBefore.plus(365, ChronoUnit.DAYS); - String clusterID = UUID.randomUUID().toString(); - String scmID = UUID.randomUUID().toString(); - String subject = "testRootCert"; - HDDSKeyGenerator keyGen = - new HDDSKeyGenerator(securityConfig.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - - SelfSignedCertificate.Builder builder = - SelfSignedCertificate.newBuilder() - .setBeginDate(notBefore) - .setEndDate(notAfter) - .setClusterID(clusterID) - .setScmID(scmID) - .setSubject(subject) - .setConfiguration(conf) - .setKey(keyPair) - .makeCA(); - try { - builder.setKey(null); - builder.build(); - Assert.fail("Null Key should have failed."); - } catch (NullPointerException | IllegalArgumentException e) { - builder.setKey(keyPair); - } - - // Now try with Blank Subject. - try { - builder.setSubject(""); - builder.build(); - Assert.fail("Null/Blank Subject should have thrown."); - } catch (IllegalArgumentException e) { - builder.setSubject(subject); - } - - // Now try with blank/null SCM ID - try { - builder.setScmID(null); - builder.build(); - Assert.fail("Null/Blank SCM ID should have thrown."); - } catch (IllegalArgumentException e) { - builder.setScmID(scmID); - } - - - // Now try with blank/null SCM ID - try { - builder.setClusterID(null); - builder.build(); - Assert.fail("Null/Blank Cluster ID should have thrown."); - } catch (IllegalArgumentException e) { - builder.setClusterID(clusterID); - } - - - // Swap the Begin and End Date and verify that we cannot create a - // certificate like that. - try { - builder.setBeginDate(notAfter); - builder.setEndDate(notBefore); - builder.build(); - Assert.fail("Illegal dates should have thrown."); - } catch (IllegalArgumentException e) { - builder.setBeginDate(notBefore); - builder.setEndDate(notAfter); - } - - try { - KeyPair newKey = keyGen.generateKey(); - KeyPair wrongKey = new KeyPair(newKey.getPublic(), keyPair.getPrivate()); - builder.setKey(wrongKey); - X509CertificateHolder certificateHolder = builder.build(); - X509Certificate cert = - new JcaX509CertificateConverter().getCertificate(certificateHolder); - cert.verify(wrongKey.getPublic()); - Assert.fail("Invalid Key, should have thrown."); - } catch (SCMSecurityException | CertificateException - | SignatureException | InvalidKeyException e) { - builder.setKey(keyPair); - } - // Assert that we can create a certificate with all sane params. - Assert.assertNotNull(builder.build()); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/package-info.java deleted file mode 100644 index fffe1e5bf9781..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Test classes for Certificate utilities. - */ -package org.apache.hadoop.hdds.security.x509.certificate.utils; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java deleted file mode 100644 index 08761f48e881f..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.keys; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PublicKey; -import java.security.interfaces.RSAPublicKey; -import java.security.spec.PKCS8EncodedKeySpec; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -/** - * Test class for HDDS Key Generator. - */ -public class TestHDDSKeyGenerator { - private SecurityConfig config; - - @Before - public void init() { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OZONE_METADATA_DIRS, GenericTestUtils.getTempPath("testpath")); - config = new SecurityConfig(conf); - } - /** - * In this test we verify that we are able to create a key pair, then get - * bytes of that and use ASN1. parser to parse it back to a private key. - * @throws NoSuchProviderException - On Error, due to missing Java - * dependencies. - * @throws NoSuchAlgorithmException - On Error, due to missing Java - * dependencies. - */ - @Test - public void testGenerateKey() - throws NoSuchProviderException, NoSuchAlgorithmException { - HDDSKeyGenerator keyGen = new HDDSKeyGenerator(config.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - Assert.assertEquals(config.getKeyAlgo(), - keyPair.getPrivate().getAlgorithm()); - PKCS8EncodedKeySpec keySpec = - new PKCS8EncodedKeySpec(keyPair.getPrivate().getEncoded()); - Assert.assertEquals("PKCS#8", keySpec.getFormat()); - } - - /** - * In this test we assert that size that we specified is used for Key - * generation. - * @throws NoSuchProviderException - On Error, due to missing Java - * dependencies. - * @throws NoSuchAlgorithmException - On Error, due to missing Java - * dependencies. - */ - @Test - public void testGenerateKeyWithSize() throws NoSuchProviderException, - NoSuchAlgorithmException { - HDDSKeyGenerator keyGen = new HDDSKeyGenerator(config.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(4096); - PublicKey publicKey = keyPair.getPublic(); - if(publicKey instanceof RSAPublicKey) { - Assert.assertEquals(4096, - ((RSAPublicKey)(publicKey)).getModulus().bitLength()); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java deleted file mode 100644 index d82b02f43c5dc..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.keys; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; -import static org.junit.Assert.assertNotNull; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.attribute.PosixFilePermission; -import java.security.KeyFactory; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.spec.InvalidKeySpecException; -import java.security.spec.PKCS8EncodedKeySpec; -import java.security.spec.X509EncodedKeySpec; -import java.util.Set; -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -/** - * Test class for HDDS pem writer. - */ -public class TestKeyCodec { - - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - private OzoneConfiguration configuration; - private SecurityConfig securityConfig; - private String component; - private HDDSKeyGenerator keyGenerator; - private String prefix; - - @Before - public void init() throws IOException { - configuration = new OzoneConfiguration(); - prefix = temporaryFolder.newFolder().toString(); - configuration.set(HDDS_METADATA_DIR_NAME, prefix); - keyGenerator = new HDDSKeyGenerator(configuration); - securityConfig = new SecurityConfig(configuration); - component = "test_component"; - } - - /** - * Assert basic things like we are able to create a file, and the names are - * in expected format etc. - * - * @throws NoSuchProviderException - On Error, due to missing Java - * dependencies. - * @throws NoSuchAlgorithmException - On Error, due to missing Java - * dependencies. - * @throws IOException - On I/O failure. - */ - @Test - public void testWriteKey() - throws NoSuchProviderException, NoSuchAlgorithmException, - IOException, InvalidKeySpecException { - KeyPair keys = keyGenerator.generateKey(); - KeyCodec pemWriter = new KeyCodec(securityConfig, component); - pemWriter.writeKey(keys); - - // Assert that locations have been created. - Path keyLocation = pemWriter.getSecurityConfig().getKeyLocation(component); - Assert.assertTrue(keyLocation.toFile().exists()); - - // Assert that locations are created in the locations that we specified - // using the Config. - Assert.assertTrue(keyLocation.toString().startsWith(prefix)); - Path privateKeyPath = Paths.get(keyLocation.toString(), - pemWriter.getSecurityConfig().getPrivateKeyFileName()); - Assert.assertTrue(privateKeyPath.toFile().exists()); - Path publicKeyPath = Paths.get(keyLocation.toString(), - pemWriter.getSecurityConfig().getPublicKeyFileName()); - Assert.assertTrue(publicKeyPath.toFile().exists()); - - // Read the private key and test if the expected String in the PEM file - // format exists. - byte[] privateKey = Files.readAllBytes(privateKeyPath); - String privateKeydata = new String(privateKey, StandardCharsets.UTF_8); - Assert.assertTrue(privateKeydata.contains("PRIVATE KEY")); - - // Read the public key and test if the expected String in the PEM file - // format exists. - byte[] publicKey = Files.readAllBytes(publicKeyPath); - String publicKeydata = new String(publicKey, StandardCharsets.UTF_8); - Assert.assertTrue(publicKeydata.contains("PUBLIC KEY")); - - // Let us decode the PEM file and parse it back into binary. - KeyFactory kf = KeyFactory.getInstance( - pemWriter.getSecurityConfig().getKeyAlgo()); - - // Replace the PEM Human readable guards. - privateKeydata = - privateKeydata.replace("-----BEGIN PRIVATE KEY-----\n", ""); - privateKeydata = - privateKeydata.replace("-----END PRIVATE KEY-----", ""); - - // Decode the bas64 to binary format and then use an ASN.1 parser to - // parse the binary format. - - byte[] keyBytes = Base64.decodeBase64(privateKeydata); - PKCS8EncodedKeySpec spec = new PKCS8EncodedKeySpec(keyBytes); - PrivateKey privateKeyDecoded = kf.generatePrivate(spec); - assertNotNull("Private Key should not be null", - privateKeyDecoded); - - // Let us decode the public key and veriy that we can parse it back into - // binary. - publicKeydata = - publicKeydata.replace("-----BEGIN PUBLIC KEY-----\n", ""); - publicKeydata = - publicKeydata.replace("-----END PUBLIC KEY-----", ""); - - keyBytes = Base64.decodeBase64(publicKeydata); - X509EncodedKeySpec pubKeyspec = new X509EncodedKeySpec(keyBytes); - PublicKey publicKeyDecoded = kf.generatePublic(pubKeyspec); - assertNotNull("Public Key should not be null", - publicKeyDecoded); - - // Now let us assert the permissions on the Directories and files are as - // expected. - Set expectedSet = pemWriter.getPermissionSet(); - Set currentSet = - Files.getPosixFilePermissions(privateKeyPath); - currentSet.removeAll(expectedSet); - Assert.assertEquals(0, currentSet.size()); - - currentSet = - Files.getPosixFilePermissions(publicKeyPath); - currentSet.removeAll(expectedSet); - Assert.assertEquals(0, currentSet.size()); - - currentSet = - Files.getPosixFilePermissions(keyLocation); - currentSet.removeAll(expectedSet); - Assert.assertEquals(0, currentSet.size()); - } - - /** - * Assert key rewrite fails without force option. - * - * @throws IOException - on I/O failure. - */ - @Test - public void testReWriteKey() - throws Exception { - KeyPair kp = keyGenerator.generateKey(); - KeyCodec pemWriter = new KeyCodec(securityConfig, component); - SecurityConfig secConfig = pemWriter.getSecurityConfig(); - pemWriter.writeKey(kp); - - // Assert that rewriting of keys throws exception with valid messages. - LambdaTestUtils - .intercept(IOException.class, "Private Key file already exists.", - () -> pemWriter.writeKey(kp)); - FileUtils.deleteQuietly(Paths.get( - secConfig.getKeyLocation(component).toString() + "/" + secConfig - .getPrivateKeyFileName()).toFile()); - LambdaTestUtils - .intercept(IOException.class, "Public Key file already exists.", - () -> pemWriter.writeKey(kp)); - FileUtils.deleteQuietly(Paths.get( - secConfig.getKeyLocation(component).toString() + "/" + secConfig - .getPublicKeyFileName()).toFile()); - - // Should succeed now as both public and private key are deleted. - pemWriter.writeKey(kp); - // Should succeed with overwrite flag as true. - pemWriter.writeKey(kp, true); - - } - - /** - * Assert key rewrite fails in non Posix file system. - * - * @throws IOException - on I/O failure. - */ - @Test - public void testWriteKeyInNonPosixFS() - throws Exception { - KeyPair kp = keyGenerator.generateKey(); - KeyCodec pemWriter = new KeyCodec(securityConfig, component); - pemWriter.setIsPosixFileSystem(() -> false); - - // Assert key rewrite fails in non Posix file system. - LambdaTestUtils - .intercept(IOException.class, "Unsupported File System for pem file.", - () -> pemWriter.writeKey(kp)); - } - - @Test - public void testReadWritePublicKeywithoutArgs() - throws NoSuchProviderException, NoSuchAlgorithmException, IOException, - InvalidKeySpecException { - - KeyPair kp = keyGenerator.generateKey(); - KeyCodec keycodec = new KeyCodec(securityConfig, component); - keycodec.writeKey(kp); - - PublicKey pubKey = keycodec.readPublicKey(); - assertNotNull(pubKey); - - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java deleted file mode 100644 index 49e40b4774aaa..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Test package for keys used in X.509 env. - */ -package org.apache.hadoop.hdds.security.x509.keys; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/package-info.java deleted file mode 100644 index f5414686a251c..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * X.509 Certificate and keys related tests. - */ -package org.apache.hadoop.hdds.security.x509; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java deleted file mode 100644 index 10724ab7c28bc..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.tracing; - -import io.jaegertracing.internal.JaegerSpanContext; -import io.jaegertracing.internal.exceptions.EmptyTracerStateStringException; -import io.jaegertracing.internal.exceptions.MalformedTracerStateStringException; -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertTrue; - -class TestStringCodec { - - @Test - void testExtract() throws Exception { - StringCodec codec = new StringCodec(); - - LambdaTestUtils.intercept(EmptyTracerStateStringException.class, - () -> codec.extract(null)); - - StringBuilder sb = new StringBuilder().append("123"); - LambdaTestUtils.intercept(MalformedTracerStateStringException.class, - "String does not match tracer state format", - () -> codec.extract(sb)); - - sb.append(":456:789"); - LambdaTestUtils.intercept(MalformedTracerStateStringException.class, - "String does not match tracer state format", - () -> codec.extract(sb)); - sb.append(":66"); - JaegerSpanContext context = codec.extract(sb); - String expectedContextString = new String("123:456:789:66"); - assertTrue(context.getTraceId().equals("123")); - assertTrue(context.toString().equals(expectedContextString)); - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java deleted file mode 100644 index 18e1200b4f529..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.tracing; -/** - Test cases for ozone tracing. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestHddsIdFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestHddsIdFactory.java deleted file mode 100644 index 11d0fad55eeca..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestHddsIdFactory.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import org.apache.hadoop.hdds.HddsIdFactory; -import org.junit.After; -import static org.junit.Assert.assertEquals; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Test the JMX interface for the rocksdb metastore implementation. - */ -public class TestHddsIdFactory { - - private static final Set ID_SET = ConcurrentHashMap.newKeySet(); - private static final int IDS_PER_THREAD = 10000; - private static final int NUM_OF_THREADS = 5; - - @After - public void cleanup() { - ID_SET.clear(); - } - - @Test - public void testGetLongId() throws Exception { - - ExecutorService executor = Executors.newFixedThreadPool(5); - List> tasks = new ArrayList<>(5); - addTasks(tasks); - List> result = executor.invokeAll(tasks); - assertEquals(IDS_PER_THREAD * NUM_OF_THREADS, ID_SET.size()); - for (Future r : result) { - assertEquals(IDS_PER_THREAD, r.get().intValue()); - } - } - - private void addTasks(List> tasks) { - for (int i = 0; i < NUM_OF_THREADS; i++) { - Callable task = () -> { - for (int idNum = 0; idNum < IDS_PER_THREAD; idNum++) { - long var = HddsIdFactory.getLongId(); - if (ID_SET.contains(var)) { - Assert.fail("Duplicate id found"); - } - ID_SET.add(var); - } - return IDS_PER_THREAD; - }; - tasks.add(task); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java deleted file mode 100644 index d24fcf5c3b8b6..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java +++ /dev/null @@ -1,590 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.utils; - -import com.google.common.collect.Lists; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.tuple.ImmutablePair; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.DFSUtilClient; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter; - -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.slf4j.event.Level; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.junit.runners.Parameterized.Parameters; - -/** - * Test class for ozone metadata store. - */ -@RunWith(Parameterized.class) -public class TestMetadataStore { - - private final static int MAX_GETRANGE_LENGTH = 100; - private final String storeImpl; - @Rule - public ExpectedException expectedException = ExpectedException.none(); - private MetadataStore store; - private File testDir; - - public TestMetadataStore(String metadataImpl) { - this.storeImpl = metadataImpl; - } - - @Parameters - public static Collection data() { - return Arrays.asList(new Object[][] { - {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB}, - {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB} - }); - } - - @Before - public void init() throws IOException { - if (OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(storeImpl)) { - // The initialization of RocksDB fails on Windows - assumeNotWindows(); - } - - testDir = GenericTestUtils.getTestDir(getClass().getSimpleName() - + "-" + storeImpl.toLowerCase()); - - Configuration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); - - store = MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setCreateIfMissing(true) - .setDbFile(testDir) - .build(); - - // Add 20 entries. - // {a0 : a-value0} to {a9 : a-value9} - // {b0 : b-value0} to {b9 : b-value9} - for (int i = 0; i < 10; i++) { - store.put(getBytes("a" + i), getBytes("a-value" + i)); - store.put(getBytes("b" + i), getBytes("b-value" + i)); - } - } - - @Test - public void testIterator() throws Exception { - Configuration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); - File dbDir = GenericTestUtils.getRandomizedTestDir(); - MetadataStore dbStore = MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setCreateIfMissing(true) - .setDbFile(dbDir) - .build(); - - //As database is empty, check whether iterator is working as expected or - // not. - MetaStoreIterator metaStoreIterator = - dbStore.iterator(); - assertFalse(metaStoreIterator.hasNext()); - try { - metaStoreIterator.next(); - fail("testIterator failed"); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Store has no more elements", - ex); - } - - for (int i = 0; i < 10; i++) { - store.put(getBytes("a" + i), getBytes("a-value" + i)); - } - - metaStoreIterator = dbStore.iterator(); - - int i = 0; - while (metaStoreIterator.hasNext()) { - MetadataStore.KeyValue val = metaStoreIterator.next(); - assertEquals("a" + i, getString(val.getKey())); - assertEquals("a-value" + i, getString(val.getValue())); - i++; - } - - // As we have iterated all the keys in database, hasNext should return - // false and next() should throw NoSuchElement exception. - - assertFalse(metaStoreIterator.hasNext()); - try { - metaStoreIterator.next(); - fail("testIterator failed"); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Store has no more elements", - ex); - } - dbStore.close(); - dbStore.destroy(); - FileUtils.deleteDirectory(dbDir); - - } - - @Test - public void testMetaStoreConfigDifferentFromType() throws IOException { - - Configuration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); - String dbType; - GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG); - GenericTestUtils.LogCapturer logCapturer = - GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG); - if (storeImpl.equals(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB)) { - dbType = "RocksDB"; - } else { - dbType = "LevelDB"; - } - - File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() - + "-" + dbType.toLowerCase() + "-test"); - MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf) - .setCreateIfMissing(true).setDbFile(dbDir).setDBType(dbType).build(); - assertTrue(logCapturer.getOutput().contains("Using dbType " + dbType + "" + - " for metastore")); - dbStore.close(); - dbStore.destroy(); - FileUtils.deleteDirectory(dbDir); - - } - - @Test - public void testdbTypeNotSet() throws IOException { - - Configuration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); - GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG); - GenericTestUtils.LogCapturer logCapturer = - GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG); - - File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() - + "-" + storeImpl.toLowerCase() + "-test"); - MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf) - .setCreateIfMissing(true).setDbFile(dbDir).build(); - assertTrue(logCapturer.getOutput().contains("dbType is null, using dbType" + - " " + storeImpl)); - dbStore.close(); - dbStore.destroy(); - FileUtils.deleteDirectory(dbDir); - - } - - @After - public void cleanup() throws IOException { - if (store != null) { - store.close(); - store.destroy(); - } - if (testDir != null) { - FileUtils.deleteDirectory(testDir); - } - } - - private byte[] getBytes(String str) { - return str == null ? null : - DFSUtilClient.string2Bytes(str); - } - - private String getString(byte[] bytes) { - return bytes == null ? null : - DFSUtilClient.bytes2String(bytes); - } - - @Test - public void testGetDelete() throws IOException { - for (int i = 0; i < 10; i++) { - byte[] va = store.get(getBytes("a" + i)); - assertEquals("a-value" + i, getString(va)); - - byte[] vb = store.get(getBytes("b" + i)); - assertEquals("b-value" + i, getString(vb)); - } - - String keyToDel = "del-" + UUID.randomUUID().toString(); - store.put(getBytes(keyToDel), getBytes(keyToDel)); - assertEquals(keyToDel, getString(store.get(getBytes(keyToDel)))); - store.delete(getBytes(keyToDel)); - assertEquals(null, store.get(getBytes(keyToDel))); - } - - @Test - public void testPeekFrom() throws IOException { - // Test peek from an element that has prev as well as next - testPeek("a3", "a2", "a4"); - - // Test peek from an element that only has prev - testPeek("b9", "b8", null); - - // Test peek from an element that only has next - testPeek("a0", null, "a1"); - } - - private String getExpectedValue(String key) { - if (key == null) { - return null; - } - char[] arr = key.toCharArray(); - return new StringBuilder().append(arr[0]).append("-value") - .append(arr[arr.length - 1]).toString(); - } - - private void testPeek(String peekKey, String prevKey, String nextKey) - throws IOException { - // Look for current - String k = null; - String v = null; - ImmutablePair current = - store.peekAround(0, getBytes(peekKey)); - if (current != null) { - k = getString(current.getKey()); - v = getString(current.getValue()); - } - assertEquals(peekKey, k); - assertEquals(v, getExpectedValue(peekKey)); - - // Look for prev - k = null; - v = null; - ImmutablePair prev = - store.peekAround(-1, getBytes(peekKey)); - if (prev != null) { - k = getString(prev.getKey()); - v = getString(prev.getValue()); - } - assertEquals(prevKey, k); - assertEquals(v, getExpectedValue(prevKey)); - - // Look for next - k = null; - v = null; - ImmutablePair next = - store.peekAround(1, getBytes(peekKey)); - if (next != null) { - k = getString(next.getKey()); - v = getString(next.getValue()); - } - assertEquals(nextKey, k); - assertEquals(v, getExpectedValue(nextKey)); - } - - @Test - public void testIterateKeys() throws IOException { - // iterate keys from b0 - ArrayList result = Lists.newArrayList(); - store.iterate(getBytes("b0"), (k, v) -> { - // b-value{i} - String value = getString(v); - char num = value.charAt(value.length() - 1); - // each value adds 1 - int i = Character.getNumericValue(num) + 1; - value = value.substring(0, value.length() - 1) + i; - result.add(value); - return true; - }); - - assertFalse(result.isEmpty()); - for (int i = 0; i < result.size(); i++) { - assertEquals("b-value" + (i + 1), result.get(i)); - } - - // iterate from a non exist key - result.clear(); - store.iterate(getBytes("xyz"), (k, v) -> { - result.add(getString(v)); - return true; - }); - assertTrue(result.isEmpty()); - - // iterate from the beginning - result.clear(); - store.iterate(null, (k, v) -> { - result.add(getString(v)); - return true; - }); - assertEquals(20, result.size()); - } - - @Test - public void testGetRangeKVs() throws IOException { - List> result = null; - - // Set empty startKey will return values from beginning. - result = store.getRangeKVs(null, 5); - assertEquals(5, result.size()); - assertEquals("a-value2", getString(result.get(2).getValue())); - - // Empty list if startKey doesn't exist. - result = store.getRangeKVs(getBytes("a12"), 5); - assertEquals(0, result.size()); - - // Returns max available entries after a valid startKey. - result = store.getRangeKVs(getBytes("b0"), MAX_GETRANGE_LENGTH); - assertEquals(10, result.size()); - assertEquals("b0", getString(result.get(0).getKey())); - assertEquals("b-value0", getString(result.get(0).getValue())); - result = store.getRangeKVs(getBytes("b0"), 5); - assertEquals(5, result.size()); - - // Both startKey and count are honored. - result = store.getRangeKVs(getBytes("a9"), 2); - assertEquals(2, result.size()); - assertEquals("a9", getString(result.get(0).getKey())); - assertEquals("a-value9", getString(result.get(0).getValue())); - assertEquals("b0", getString(result.get(1).getKey())); - assertEquals("b-value0", getString(result.get(1).getValue())); - - // Filter keys by prefix. - // It should returns all "b*" entries. - MetadataKeyFilter filter1 = new KeyPrefixFilter().addFilter("b"); - result = store.getRangeKVs(null, 100, filter1); - assertEquals(10, result.size()); - assertTrue(result.stream().allMatch(entry -> - new String(entry.getKey(), UTF_8).startsWith("b") - )); - assertEquals(20, filter1.getKeysScannedNum()); - assertEquals(10, filter1.getKeysHintedNum()); - result = store.getRangeKVs(null, 3, filter1); - assertEquals(3, result.size()); - result = store.getRangeKVs(getBytes("b3"), 1, filter1); - assertEquals("b-value3", getString(result.get(0).getValue())); - - // Define a customized filter that filters keys by suffix. - // Returns all "*2" entries. - MetadataKeyFilter filter2 = (preKey, currentKey, nextKey) - -> getString(currentKey).endsWith("2"); - result = store.getRangeKVs(null, MAX_GETRANGE_LENGTH, filter2); - assertEquals(2, result.size()); - assertEquals("a2", getString(result.get(0).getKey())); - assertEquals("b2", getString(result.get(1).getKey())); - result = store.getRangeKVs(null, 1, filter2); - assertEquals(1, result.size()); - assertEquals("a2", getString(result.get(0).getKey())); - - // Apply multiple filters. - result = store.getRangeKVs(null, MAX_GETRANGE_LENGTH, filter1, filter2); - assertEquals(1, result.size()); - assertEquals("b2", getString(result.get(0).getKey())); - assertEquals("b-value2", getString(result.get(0).getValue())); - - // If filter is null, no effect. - result = store.getRangeKVs(null, 1, (MetadataKeyFilter[]) null); - assertEquals(1, result.size()); - assertEquals("a0", getString(result.get(0).getKey())); - } - - @Test - public void testGetSequentialRangeKVs() throws IOException { - MetadataKeyFilter suffixFilter = (preKey, currentKey, nextKey) - -> DFSUtil.bytes2String(currentKey).endsWith("2"); - // Suppose to return a2 and b2 - List> result = - store.getRangeKVs(null, MAX_GETRANGE_LENGTH, suffixFilter); - assertEquals(2, result.size()); - assertEquals("a2", DFSUtil.bytes2String(result.get(0).getKey())); - assertEquals("b2", DFSUtil.bytes2String(result.get(1).getKey())); - - // Suppose to return just a2, because when it iterates to a3, - // the filter no long matches and it should stop from there. - result = store.getSequentialRangeKVs(null, - MAX_GETRANGE_LENGTH, suffixFilter); - assertEquals(1, result.size()); - assertEquals("a2", DFSUtil.bytes2String(result.get(0).getKey())); - } - - @Test - public void testGetRangeLength() throws IOException { - List> result = null; - - result = store.getRangeKVs(null, 0); - assertEquals(0, result.size()); - - result = store.getRangeKVs(null, 1); - assertEquals(1, result.size()); - - // Count less than zero is invalid. - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("Invalid count given"); - store.getRangeKVs(null, -1); - } - - @Test - public void testInvalidStartKey() throws IOException { - // If startKey is invalid, the returned list should be empty. - List> kvs = - store.getRangeKVs(getBytes("unknownKey"), MAX_GETRANGE_LENGTH); - assertEquals(0, kvs.size()); - } - - @Test - public void testDestroyDB() throws IOException { - // create a new DB to test db destroy - Configuration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); - - File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() - + "-" + storeImpl.toLowerCase() + "-toDestroy"); - MetadataStore dbStore = MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setCreateIfMissing(true) - .setDbFile(dbDir) - .build(); - - dbStore.put(getBytes("key1"), getBytes("value1")); - dbStore.put(getBytes("key2"), getBytes("value2")); - - assertFalse(dbStore.isEmpty()); - assertTrue(dbDir.exists()); - assertTrue(dbDir.listFiles().length > 0); - - dbStore.destroy(); - - assertFalse(dbDir.exists()); - } - - @Test - public void testBatchWrite() throws IOException { - Configuration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); - - File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() - + "-" + storeImpl.toLowerCase() + "-batchWrite"); - MetadataStore dbStore = MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setCreateIfMissing(true) - .setDbFile(dbDir) - .build(); - - List expectedResult = Lists.newArrayList(); - for (int i = 0; i < 10; i++) { - dbStore.put(getBytes("batch-" + i), getBytes("batch-value-" + i)); - expectedResult.add("batch-" + i); - } - - BatchOperation batch = new BatchOperation(); - batch.delete(getBytes("batch-2")); - batch.delete(getBytes("batch-3")); - batch.delete(getBytes("batch-4")); - batch.put(getBytes("batch-new-2"), getBytes("batch-new-value-2")); - - expectedResult.remove("batch-2"); - expectedResult.remove("batch-3"); - expectedResult.remove("batch-4"); - expectedResult.add("batch-new-2"); - - dbStore.writeBatch(batch); - - Iterator it = expectedResult.iterator(); - AtomicInteger count = new AtomicInteger(0); - dbStore.iterate(null, (key, value) -> { - count.incrementAndGet(); - return it.hasNext() && it.next().equals(getString(key)); - }); - - assertEquals(8, count.get()); - } - - @Test - public void testKeyPrefixFilter() throws IOException { - List> result = null; - RuntimeException exception = null; - - try { - new KeyPrefixFilter().addFilter("b0", true).addFilter("b"); - } catch (IllegalArgumentException e) { - exception = e; - assertTrue(exception.getMessage().contains("KeyPrefix: b already " + - "rejected")); - } - - try { - new KeyPrefixFilter().addFilter("b0").addFilter("b", true); - } catch (IllegalArgumentException e) { - exception = e; - assertTrue(exception.getMessage().contains("KeyPrefix: b already " + - "accepted")); - } - - try { - new KeyPrefixFilter().addFilter("b", true).addFilter("b0"); - } catch (IllegalArgumentException e) { - exception = e; - assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " + - "rejected")); - } - - try { - new KeyPrefixFilter().addFilter("b").addFilter("b0", true); - } catch (IllegalArgumentException e) { - exception = e; - assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " + - "accepted")); - } - - MetadataKeyFilter filter1 = new KeyPrefixFilter(true) - .addFilter("a0") - .addFilter("a1") - .addFilter("b", true); - result = store.getRangeKVs(null, 100, filter1); - assertEquals(2, result.size()); - assertTrue(result.stream().anyMatch(entry -> new String(entry.getKey(), - UTF_8) - .startsWith("a0")) && result.stream().anyMatch(entry -> new String( - entry.getKey(), UTF_8).startsWith("a1"))); - - filter1 = new KeyPrefixFilter(true).addFilter("b", true); - result = store.getRangeKVs(null, 100, filter1); - assertEquals(0, result.size()); - - filter1 = new KeyPrefixFilter().addFilter("b", true); - result = store.getRangeKVs(null, 100, filter1); - assertEquals(10, result.size()); - assertTrue(result.stream().allMatch(entry -> new String(entry.getKey(), - UTF_8) - .startsWith("a"))); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRetriableTask.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRetriableTask.java deleted file mode 100644 index 148ccf94a1ba7..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRetriableTask.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; - -import static org.junit.Assert.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; - -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.junit.Test; - -import java.io.IOException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.zip.ZipException; - -/** - * Tests for {@link RetriableTask}. - */ -public class TestRetriableTask { - - @Test - public void returnsSuccessfulResult() throws Exception { - String result = "bilbo"; - RetriableTask task = new RetriableTask<>( - RetryPolicies.RETRY_FOREVER, "test", () -> result); - assertEquals(result, task.call()); - } - - @Test - public void returnsSuccessfulResultAfterFailures() throws Exception { - String result = "gandalf"; - AtomicInteger attempts = new AtomicInteger(); - RetriableTask task = new RetriableTask<>( - RetryPolicies.RETRY_FOREVER, "test", - () -> { - if (attempts.incrementAndGet() <= 2) { - throw new Exception("testing"); - } - return result; - }); - assertEquals(result, task.call()); - } - - @Test - public void respectsRetryPolicy() { - int expectedAttempts = 3; - AtomicInteger attempts = new AtomicInteger(); - RetryPolicy retryPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep( - expectedAttempts, 1, TimeUnit.MILLISECONDS); - RetriableTask task = new RetriableTask<>(retryPolicy, "thr", () -> { - attempts.incrementAndGet(); - throw new ZipException("testing"); - }); - - IOException e = assertThrows(IOException.class, task::call); - assertEquals(ZipException.class, e.getCause().getClass()); - assertEquals(expectedAttempts, attempts.get()); - } - -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java deleted file mode 100644 index 29c780304cbb5..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java +++ /dev/null @@ -1,234 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.metrics2.AbstractMetric; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.MetricsTag; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import javax.management.MBeanServer; -import java.io.File; -import java.io.IOException; -import java.lang.management.ManagementFactory; -import java.util.HashMap; -import java.util.Map; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -/** - * Test the JMX interface for the rocksdb metastore implementation. - */ -public class TestRocksDBStoreMBean { - - private Configuration conf; - - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB); - } - - - @Test - public void testJmxBeans() throws Exception { - - RocksDBStore metadataStore = getTestRocksDBStoreWithData(); - - MBeanServer platformMBeanServer = - ManagementFactory.getPlatformMBeanServer(); - Thread.sleep(2000); - - Object keysWritten = platformMBeanServer - .getAttribute(metadataStore.getStatMBeanName(), "NUMBER_KEYS_WRITTEN"); - - assertEquals(10L, keysWritten); - - Object dbWriteAverage = platformMBeanServer - .getAttribute(metadataStore.getStatMBeanName(), "DB_WRITE_AVERAGE"); - assertTrue((double) dbWriteAverage > 0); - - metadataStore.close(); - - } - - @Test() - public void testDisabledStat() throws Exception { - File testDir = GenericTestUtils - .getTestDir(getClass().getSimpleName() + "-withoutstat"); - - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS, - OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF); - - RocksDBStore metadataStore = - (RocksDBStore) MetadataStoreBuilder.newBuilder().setConf(conf) - .setCreateIfMissing(true).setDbFile(testDir).build(); - - Assert.assertNull(metadataStore.getStatMBeanName()); - } - - @Test - public void testMetricsSystemIntegration() throws Exception { - - RocksDBStore metadataStore = getTestRocksDBStoreWithData(); - Thread.sleep(2000); - - MetricsSystem ms = DefaultMetricsSystem.instance(); - MetricsSource rdbSource = - ms.getSource("Rocksdb_TestRocksDBStoreMBean-withstat"); - - BufferedMetricsCollector metricsCollector = new BufferedMetricsCollector(); - rdbSource.getMetrics(metricsCollector, true); - - Map metrics = metricsCollector.getMetricsRecordBuilder() - .getMetrics(); - assertTrue(10.0 == metrics.get("NUMBER_KEYS_WRITTEN")); - assertTrue(metrics.get("DB_WRITE_AVERAGE") > 0); - metadataStore.close(); - } - - private RocksDBStore getTestRocksDBStoreWithData() throws IOException { - File testDir = - GenericTestUtils.getTestDir(getClass().getSimpleName() + "-withstat"); - - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS, "ALL"); - - RocksDBStore metadataStore = - (RocksDBStore) MetadataStoreBuilder.newBuilder().setConf(conf) - .setCreateIfMissing(true).setDbFile(testDir).build(); - - for (int i = 0; i < 10; i++) { - metadataStore.put("key".getBytes(UTF_8), "value".getBytes(UTF_8)); - } - - return metadataStore; - } -} - -/** - * Test class to buffer a single MetricsRecordBuilder instance. - */ -class BufferedMetricsCollector implements MetricsCollector { - - private BufferedMetricsRecordBuilderImpl metricsRecordBuilder; - - BufferedMetricsCollector() { - metricsRecordBuilder = new BufferedMetricsRecordBuilderImpl(); - } - - public BufferedMetricsRecordBuilderImpl getMetricsRecordBuilder() { - return metricsRecordBuilder; - } - - @Override - public MetricsRecordBuilder addRecord(String s) { - metricsRecordBuilder.setContext(s); - return metricsRecordBuilder; - } - - @Override - public MetricsRecordBuilder addRecord(MetricsInfo metricsInfo) { - return metricsRecordBuilder; - } - - /** - * Test class to buffer a single snapshot of metrics. - */ - class BufferedMetricsRecordBuilderImpl extends MetricsRecordBuilder { - - private Map metrics = new HashMap<>(); - private String contextName; - - public Map getMetrics() { - return metrics; - } - - @Override - public MetricsRecordBuilder tag(MetricsInfo metricsInfo, String s) { - return null; - } - - @Override - public MetricsRecordBuilder add(MetricsTag metricsTag) { - return null; - } - - @Override - public MetricsRecordBuilder add(AbstractMetric abstractMetric) { - return null; - } - - @Override - public MetricsRecordBuilder setContext(String s) { - this.contextName = s; - return this; - } - - @Override - public MetricsRecordBuilder addCounter(MetricsInfo metricsInfo, int i) { - return null; - } - - @Override - public MetricsRecordBuilder addCounter(MetricsInfo metricsInfo, long l) { - metrics.put(metricsInfo.name(), (double)l); - return this; - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, int i) { - return null; - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, long l) { - return null; - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, float v) { - return null; - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, double v) { - metrics.put(metricsInfo.name(), v); - return this; - } - - @Override - public MetricsCollector parent() { - return null; - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java deleted file mode 100644 index 4ba54e98fcabb..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdfs.DFSUtil; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.DBOptions; -import org.rocksdb.RocksDB; - -import java.io.File; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import static org.apache.hadoop.hdds.utils.db.DBConfigFromFile.getOptionsFileNameFromDB; - -/** - * DBConf tests. - */ -public class TestDBConfigFromFile { - private final static String DB_FILE = "test.db"; - private final static String INI_FILE = getOptionsFileNameFromDB(DB_FILE); - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - @Before - public void setUp() throws Exception { - System.setProperty(DBConfigFromFile.CONFIG_DIR, - folder.newFolder().toString()); - ClassLoader classLoader = getClass().getClassLoader(); - File testData = new File(classLoader.getResource(INI_FILE).getFile()); - File dest = Paths.get( - System.getProperty(DBConfigFromFile.CONFIG_DIR), INI_FILE).toFile(); - FileUtils.copyFile(testData, dest); - } - - @After - public void tearDown() throws Exception { - } - - @Test - public void readFromFile() throws IOException { - final List families = - Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY), - "First", "Second", "Third", - "Fourth", "Fifth", - "Sixth"); - final List columnFamilyDescriptors = - new ArrayList<>(); - for (String family : families) { - columnFamilyDescriptors.add( - new ColumnFamilyDescriptor(family.getBytes(StandardCharsets.UTF_8), - new ColumnFamilyOptions())); - } - - final DBOptions options = DBConfigFromFile.readFromFile(DB_FILE, - columnFamilyDescriptors); - - // Some Random Values Defined in the test.db.ini, we verify that we are - // able to get values that are defined in the test.db.ini. - Assert.assertNotNull(options); - Assert.assertEquals(551615L, options.maxManifestFileSize()); - Assert.assertEquals(1000L, options.keepLogFileNum()); - Assert.assertEquals(1048576, options.writableFileMaxBufferSize()); - } - - @Test - public void readFromFileInvalidConfig() throws IOException { - final List families = - Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY), - "First", "Second", "Third", - "Fourth", "Fifth", - "Sixth"); - final List columnFamilyDescriptors = - new ArrayList<>(); - for (String family : families) { - columnFamilyDescriptors.add( - new ColumnFamilyDescriptor(family.getBytes(StandardCharsets.UTF_8), - new ColumnFamilyOptions())); - } - - final DBOptions options = DBConfigFromFile.readFromFile("badfile.db.ini", - columnFamilyDescriptors); - - // This has to return a Null, since we have config defined for badfile.db - Assert.assertNull(options); - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java deleted file mode 100644 index d406060165f32..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; - -import java.io.File; -import java.io.IOException; -import java.nio.charset.StandardCharsets; - -/** - * Tests RDBStore creation. - */ -public class TestDBStoreBuilder { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - @Rule - public ExpectedException thrown = ExpectedException.none(); - - @Before - public void setUp() throws Exception { - System.setProperty(DBConfigFromFile.CONFIG_DIR, - folder.newFolder().toString()); - } - - @Test - public void builderWithoutAnyParams() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - thrown.expect(IOException.class); - DBStoreBuilder.newBuilder(conf).build(); - } - - @Test - public void builderWithOneParamV1() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - thrown.expect(IOException.class); - DBStoreBuilder.newBuilder(conf) - .setName("Test.db") - .build(); - } - - @Test - public void builderWithOneParamV2() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if(!newFolder.exists()) { - Assert.assertTrue(newFolder.mkdirs()); - } - thrown.expect(IOException.class); - DBStoreBuilder.newBuilder(conf) - .setPath(newFolder.toPath()) - .build(); - } - - @Test - public void builderWithOpenClose() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if(!newFolder.exists()) { - Assert.assertTrue(newFolder.mkdirs()); - } - DBStore dbStore = DBStoreBuilder.newBuilder(conf) - .setName("Test.db") - .setPath(newFolder.toPath()) - .build(); - // Nothing to do just open and Close. - dbStore.close(); - } - - @Test - public void builderWithDoubleTableName() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if(!newFolder.exists()) { - Assert.assertTrue(newFolder.mkdirs()); - } - thrown.expect(IOException.class); - DBStoreBuilder.newBuilder(conf) - .setName("Test.db") - .setPath(newFolder.toPath()) - .addTable("FIRST") - .addTable("FIRST") - .build(); - // Nothing to do , This will throw so we do not have to close. - - } - - @Test - public void builderWithDataWrites() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if(!newFolder.exists()) { - Assert.assertTrue(newFolder.mkdirs()); - } - try (DBStore dbStore = DBStoreBuilder.newBuilder(conf) - .setName("Test.db") - .setPath(newFolder.toPath()) - .addTable("First") - .addTable("Second") - .build()) { - try (Table firstTable = dbStore.getTable("First")) { - byte[] key = - RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8); - firstTable.put(key, value); - byte[] temp = firstTable.get(key); - Assert.assertArrayEquals(value, temp); - } - - try (Table secondTable = dbStore.getTable("Second")) { - Assert.assertTrue(secondTable.isEmpty()); - } - } - } - - @Test - public void builderWithDiskProfileWrites() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if(!newFolder.exists()) { - Assert.assertTrue(newFolder.mkdirs()); - } - try (DBStore dbStore = DBStoreBuilder.newBuilder(conf) - .setName("Test.db") - .setPath(newFolder.toPath()) - .addTable("First") - .addTable("Second") - .setProfile(DBProfile.DISK) - .build()) { - try (Table firstTable = dbStore.getTable("First")) { - byte[] key = - RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8); - firstTable.put(key, value); - byte[] temp = firstTable.get(key); - Assert.assertArrayEquals(value, temp); - } - - try (Table secondTable = dbStore.getTable("Second")) { - Assert.assertTrue(secondTable.isEmpty()); - } - } - } - - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java deleted file mode 100644 index 6084ae96cd5cc..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java +++ /dev/null @@ -1,349 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import javax.management.MBeanServer; - -import java.io.IOException; -import java.lang.management.ManagementFactory; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.commons.codec.binary.StringUtils; -import org.apache.hadoop.hdfs.DFSUtil; - -import org.apache.commons.lang3.RandomStringUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; -import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.DBOptions; -import org.rocksdb.RocksDB; -import org.rocksdb.Statistics; -import org.rocksdb.StatsLevel; - -/** - * RDBStore Tests. - */ -public class TestRDBStore { - private final List families = - Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY), - "First", "Second", "Third", - "Fourth", "Fifth", - "Sixth"); - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - @Rule - public ExpectedException thrown = ExpectedException.none(); - private RDBStore rdbStore = null; - private DBOptions options = null; - private Set configSet; - - @Before - public void setUp() throws Exception { - options = new DBOptions(); - options.setCreateIfMissing(true); - options.setCreateMissingColumnFamilies(true); - - Statistics statistics = new Statistics(); - statistics.setStatsLevel(StatsLevel.ALL); - options = options.setStatistics(statistics); - configSet = new HashSet<>(); - for(String name : families) { - TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions()); - configSet.add(newConfig); - } - rdbStore = new RDBStore(folder.newFolder(), options, configSet); - } - - @After - public void tearDown() throws Exception { - if (rdbStore != null) { - rdbStore.close(); - } - } - - private void insertRandomData(RDBStore dbStore, int familyIndex) - throws Exception { - try (Table firstTable = dbStore.getTable(families.get(familyIndex))) { - Assert.assertNotNull("Table cannot be null", firstTable); - for (int x = 0; x < 100; x++) { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - firstTable.put(key, value); - } - } - } - - @Test - public void compactDB() throws Exception { - try (RDBStore newStore = - new RDBStore(folder.newFolder(), options, configSet)) { - Assert.assertNotNull("DB Store cannot be null", newStore); - insertRandomData(newStore, 1); - // This test does not assert anything if there is any error this test - // will throw and fail. - newStore.compactDB(); - } - } - - @Test - public void close() throws Exception { - RDBStore newStore = - new RDBStore(folder.newFolder(), options, configSet); - Assert.assertNotNull("DBStore cannot be null", newStore); - // This test does not assert anything if there is any error this test - // will throw and fail. - newStore.close(); - } - - @Test - public void moveKey() throws Exception { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - - try (Table firstTable = rdbStore.getTable(families.get(1))) { - firstTable.put(key, value); - try (Table secondTable = rdbStore - .getTable(families.get(2))) { - rdbStore.move(key, firstTable, secondTable); - byte[] newvalue = secondTable.get(key); - // Make sure we have value in the second table - Assert.assertNotNull(newvalue); - //and it is same as what we wrote to the FirstTable - Assert.assertArrayEquals(value, newvalue); - } - // After move this key must not exist in the first table. - Assert.assertNull(firstTable.get(key)); - } - } - - @Test - public void moveWithValue() throws Exception { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - - byte[] nextValue = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - try (Table firstTable = rdbStore.getTable(families.get(1))) { - firstTable.put(key, value); - try (Table secondTable = rdbStore - .getTable(families.get(2))) { - rdbStore.move(key, nextValue, firstTable, secondTable); - byte[] newvalue = secondTable.get(key); - // Make sure we have value in the second table - Assert.assertNotNull(newvalue); - //and it is not same as what we wrote to the FirstTable, and equals - // the new value. - Assert.assertArrayEquals(nextValue, nextValue); - } - } - - } - - @Test - public void getEstimatedKeyCount() throws Exception { - try (RDBStore newStore = - new RDBStore(folder.newFolder(), options, configSet)) { - Assert.assertNotNull("DB Store cannot be null", newStore); - - // Write 100 keys to the first table. - insertRandomData(newStore, 1); - - // Write 100 keys to the secondTable table. - insertRandomData(newStore, 2); - - // Let us make sure that our estimate is not off by 10% - Assert.assertTrue(newStore.getEstimatedKeyCount() > 180 - || newStore.getEstimatedKeyCount() < 220); - } - } - - @Test - public void getStatMBeanName() throws Exception { - - try (Table firstTable = rdbStore.getTable(families.get(1))) { - for (int y = 0; y < 100; y++) { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - firstTable.put(key, value); - } - } - MBeanServer platformMBeanServer = - ManagementFactory.getPlatformMBeanServer(); - Thread.sleep(2000); - - Object keysWritten = platformMBeanServer - .getAttribute(rdbStore.getStatMBeanName(), "NUMBER_KEYS_WRITTEN"); - - Assert.assertTrue(((Long) keysWritten) >= 99L); - - Object dbWriteAverage = platformMBeanServer - .getAttribute(rdbStore.getStatMBeanName(), "DB_WRITE_AVERAGE"); - Assert.assertTrue((double) dbWriteAverage > 0); - } - - @Test - public void getTable() throws Exception { - for (String tableName : families) { - try (Table table = rdbStore.getTable(tableName)) { - Assert.assertNotNull(tableName + "is null", table); - } - } - thrown.expect(IOException.class); - rdbStore.getTable("ATableWithNoName"); - } - - @Test - public void listTables() throws Exception { - List

tableList = rdbStore.listTables(); - Assert.assertNotNull("Table list cannot be null", tableList); - Map hashTable = new HashMap<>(); - - for (Table t : tableList) { - hashTable.put(t.getName(), t); - } - - int count = families.size(); - // Assert that we have all the tables in the list and no more. - for (String name : families) { - Assert.assertTrue(hashTable.containsKey(name)); - count--; - } - Assert.assertEquals(0, count); - } - - @Test - public void testRocksDBCheckpoint() throws Exception { - try (RDBStore newStore = - new RDBStore(folder.newFolder(), options, configSet)) { - Assert.assertNotNull("DB Store cannot be null", newStore); - - insertRandomData(newStore, 1); - DBCheckpoint checkpoint = - newStore.getCheckpoint(true); - Assert.assertNotNull(checkpoint); - - RDBStore restoredStoreFromCheckPoint = - new RDBStore(checkpoint.getCheckpointLocation().toFile(), - options, configSet); - - // Let us make sure that our estimate is not off by 10% - Assert.assertTrue( - restoredStoreFromCheckPoint.getEstimatedKeyCount() > 90 - || restoredStoreFromCheckPoint.getEstimatedKeyCount() < 110); - checkpoint.cleanupCheckpoint(); - } - - } - - @Test - public void testRocksDBCheckpointCleanup() throws Exception { - try (RDBStore newStore = - new RDBStore(folder.newFolder(), options, configSet)) { - Assert.assertNotNull("DB Store cannot be null", newStore); - - insertRandomData(newStore, 1); - DBCheckpoint checkpoint = - newStore.getCheckpoint(true); - Assert.assertNotNull(checkpoint); - - Assert.assertTrue(Files.exists( - checkpoint.getCheckpointLocation())); - checkpoint.cleanupCheckpoint(); - Assert.assertFalse(Files.exists( - checkpoint.getCheckpointLocation())); - } - } - - /** - * Not strictly a unit test. Just a confirmation of the expected behavior - * of RocksDB keyMayExist API. - * Expected behavior - On average, keyMayExist latency < key.get() latency - * for invalid keys. - * @throws Exception if unable to read from RocksDB. - */ - @Test - public void testRocksDBKeyMayExistApi() throws Exception { - try (RDBStore newStore = - new RDBStore(folder.newFolder(), options, configSet)) { - RocksDB db = newStore.getDb(); - - //Test with 50 invalid keys. - long start = System.nanoTime(); - for (int i = 0; i < 50; i++) { - Assert.assertTrue(db.get( - StringUtils.getBytesUtf16("key" + i))== null); - } - long end = System.nanoTime(); - long keyGetLatency = end - start; - - start = System.nanoTime(); - for (int i = 0; i < 50; i++) { - Assert.assertFalse(db.keyMayExist( - StringUtils.getBytesUtf16("key" + i), new StringBuilder())); - } - end = System.nanoTime(); - long keyMayExistLatency = end - start; - - Assert.assertTrue(keyMayExistLatency < keyGetLatency); - } - } - - @Test - public void testGetDBUpdatesSince() throws Exception { - - try (RDBStore newStore = - new RDBStore(folder.newFolder(), options, configSet)) { - - try (Table firstTable = newStore.getTable(families.get(1))) { - firstTable.put(StringUtils.getBytesUtf16("Key1"), StringUtils - .getBytesUtf16("Value1")); - firstTable.put(StringUtils.getBytesUtf16("Key2"), StringUtils - .getBytesUtf16("Value2")); - } - Assert.assertTrue( - newStore.getDb().getLatestSequenceNumber() == 2); - - DBUpdatesWrapper dbUpdatesSince = newStore.getUpdatesSince(0); - Assert.assertEquals(2, dbUpdatesSince.getData().size()); - } - } - - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java deleted file mode 100644 index 788883dbbfff6..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import org.apache.hadoop.hdfs.DFSUtil; - -import org.apache.commons.lang3.RandomStringUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.DBOptions; -import org.rocksdb.RocksDB; -import org.rocksdb.Statistics; -import org.rocksdb.StatsLevel; - -/** - * Tests for RocksDBTable Store. - */ -public class TestRDBTableStore { - private static int count = 0; - private final List families = - Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY), - "First", "Second", "Third", - "Fourth", "Fifth", - "Sixth", "Seventh", - "Eighth"); - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - private RDBStore rdbStore = null; - private DBOptions options = null; - - private static boolean consume(Table.KeyValue keyValue) { - count++; - try { - Assert.assertNotNull(keyValue.getKey()); - } catch(IOException ex) { - Assert.fail("Unexpected Exception " + ex.toString()); - } - return true; - } - - @Before - public void setUp() throws Exception { - options = new DBOptions(); - options.setCreateIfMissing(true); - options.setCreateMissingColumnFamilies(true); - - Statistics statistics = new Statistics(); - statistics.setStatsLevel(StatsLevel.ALL); - options = options.setStatistics(statistics); - - Set configSet = new HashSet<>(); - for(String name : families) { - TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions()); - configSet.add(newConfig); - } - rdbStore = new RDBStore(folder.newFolder(), options, configSet); - } - - @After - public void tearDown() throws Exception { - if (rdbStore != null) { - rdbStore.close(); - } - } - - @Test - public void toIOException() { - } - - @Test - public void getHandle() throws Exception { - try (Table testTable = rdbStore.getTable("First")) { - Assert.assertNotNull(testTable); - Assert.assertNotNull(((RDBTable) testTable).getHandle()); - } - } - - @Test - public void putGetAndEmpty() throws Exception { - try (Table testTable = rdbStore.getTable("First")) { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - testTable.put(key, value); - Assert.assertFalse(testTable.isEmpty()); - byte[] readValue = testTable.get(key); - Assert.assertArrayEquals(value, readValue); - } - try (Table secondTable = rdbStore.getTable("Second")) { - Assert.assertTrue(secondTable.isEmpty()); - } - } - - @Test - public void delete() throws Exception { - List deletedKeys = new ArrayList<>(); - List validKeys = new ArrayList<>(); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - for (int x = 0; x < 100; x++) { - deletedKeys.add( - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8)); - } - - for (int x = 0; x < 100; x++) { - validKeys.add( - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8)); - } - - // Write all the keys and delete the keys scheduled for delete. - //Assert we find only expected keys in the Table. - try (Table testTable = rdbStore.getTable("Fourth")) { - for (int x = 0; x < deletedKeys.size(); x++) { - testTable.put(deletedKeys.get(x), value); - testTable.delete(deletedKeys.get(x)); - } - - for (int x = 0; x < validKeys.size(); x++) { - testTable.put(validKeys.get(x), value); - } - - for (int x = 0; x < validKeys.size(); x++) { - Assert.assertNotNull(testTable.get(validKeys.get(0))); - } - - for (int x = 0; x < deletedKeys.size(); x++) { - Assert.assertNull(testTable.get(deletedKeys.get(0))); - } - } - } - - @Test - public void batchPut() throws Exception { - try (Table testTable = rdbStore.getTable("Fifth"); - BatchOperation batch = rdbStore.initBatchOperation()) { - //given - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - Assert.assertNull(testTable.get(key)); - - //when - testTable.putWithBatch(batch, key, value); - rdbStore.commitBatchOperation(batch); - - //then - Assert.assertNotNull(testTable.get(key)); - } - } - - @Test - public void batchDelete() throws Exception { - try (Table testTable = rdbStore.getTable("Fifth"); - BatchOperation batch = rdbStore.initBatchOperation()) { - - //given - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - testTable.put(key, value); - Assert.assertNotNull(testTable.get(key)); - - - //when - testTable.deleteWithBatch(batch, key); - rdbStore.commitBatchOperation(batch); - - //then - Assert.assertNull(testTable.get(key)); - } - } - - @Test - public void forEachAndIterator() throws Exception { - final int iterCount = 100; - try (Table testTable = rdbStore.getTable("Sixth")) { - for (int x = 0; x < iterCount; x++) { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - testTable.put(key, value); - } - int localCount = 0; - try (TableIterator iter = testTable.iterator()) { - while (iter.hasNext()) { - Table.KeyValue keyValue = iter.next(); - localCount++; - } - - Assert.assertEquals(iterCount, localCount); - iter.seekToFirst(); - iter.forEachRemaining(TestRDBTableStore::consume); - Assert.assertEquals(iterCount, count); - - } - } - } - - @Test - public void testIsExist() throws Exception { - try (Table testTable = rdbStore.getTable("Seventh")) { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - testTable.put(key, value); - Assert.assertTrue(testTable.isExist(key)); - - testTable.delete(key); - Assert.assertFalse(testTable.isExist(key)); - - byte[] invalidKey = - RandomStringUtils.random(5).getBytes(StandardCharsets.UTF_8); - Assert.assertFalse(testTable.isExist(invalidKey)); - } - } - - @Test - public void testCountEstimatedRowsInTable() throws Exception { - try (Table testTable = rdbStore.getTable("Eighth")) { - // Add a few keys - final int numKeys = 12345; - for (int i = 0; i < numKeys; i++) { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - testTable.put(key, value); - } - long keyCount = testTable.getEstimatedKeyCount(); - // The result should be larger than zero but not exceed(?) numKeys - Assert.assertTrue(keyCount > 0 && keyCount <= numKeys); - } - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java deleted file mode 100644 index 9ee0d19074aa2..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java +++ /dev/null @@ -1,373 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Set; - -import com.google.common.base.Optional; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.hdds.utils.db.Table.KeyValue; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.DBOptions; -import org.rocksdb.RocksDB; -import org.rocksdb.Statistics; -import org.rocksdb.StatsLevel; - -/** - * Tests for RocksDBTable Store. - */ -public class TestTypedRDBTableStore { - private static int count = 0; - private final List families = - Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY), - "First", "Second", "Third", - "Fourth", "Fifth", - "Sixth", "Seven", "Eighth", - "Ninth"); - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - private RDBStore rdbStore = null; - private DBOptions options = null; - private CodecRegistry codecRegistry; - - @Before - public void setUp() throws Exception { - options = new DBOptions(); - options.setCreateIfMissing(true); - options.setCreateMissingColumnFamilies(true); - - Statistics statistics = new Statistics(); - statistics.setStatsLevel(StatsLevel.ALL); - options = options.setStatistics(statistics); - - Set configSet = new HashSet<>(); - for (String name : families) { - TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions()); - configSet.add(newConfig); - } - rdbStore = new RDBStore(folder.newFolder(), options, configSet); - - codecRegistry = new CodecRegistry(); - - } - - @After - public void tearDown() throws Exception { - if (rdbStore != null) { - rdbStore.close(); - } - } - - @Test - public void toIOException() { - } - - @Test - public void putGetAndEmpty() throws Exception { - try (Table testTable = createTypedTable( - "First")) { - String key = - RandomStringUtils.random(10); - String value = RandomStringUtils.random(10); - testTable.put(key, value); - Assert.assertFalse(testTable.isEmpty()); - String readValue = testTable.get(key); - Assert.assertEquals(value, readValue); - } - try (Table secondTable = rdbStore.getTable("Second")) { - Assert.assertTrue(secondTable.isEmpty()); - } - } - - private Table createTypedTable(String name) - throws IOException { - return new TypedTable( - rdbStore.getTable(name), - codecRegistry, - String.class, String.class); - } - - @Test - public void delete() throws Exception { - List deletedKeys = new LinkedList<>(); - List validKeys = new LinkedList<>(); - String value = - RandomStringUtils.random(10); - for (int x = 0; x < 100; x++) { - deletedKeys.add( - RandomStringUtils.random(10)); - } - - for (int x = 0; x < 100; x++) { - validKeys.add( - RandomStringUtils.random(10)); - } - - // Write all the keys and delete the keys scheduled for delete. - //Assert we find only expected keys in the Table. - try (Table testTable = createTypedTable( - "Fourth")) { - for (int x = 0; x < deletedKeys.size(); x++) { - testTable.put(deletedKeys.get(x), value); - testTable.delete(deletedKeys.get(x)); - } - - for (int x = 0; x < validKeys.size(); x++) { - testTable.put(validKeys.get(x), value); - } - - for (int x = 0; x < validKeys.size(); x++) { - Assert.assertNotNull(testTable.get(validKeys.get(0))); - } - - for (int x = 0; x < deletedKeys.size(); x++) { - Assert.assertNull(testTable.get(deletedKeys.get(0))); - } - } - } - - @Test - public void batchPut() throws Exception { - - try (Table testTable = createTypedTable( - "Fourth"); - BatchOperation batch = rdbStore.initBatchOperation()) { - //given - String key = - RandomStringUtils.random(10); - String value = - RandomStringUtils.random(10); - - //when - testTable.putWithBatch(batch, key, value); - rdbStore.commitBatchOperation(batch); - - //then - Assert.assertNotNull(testTable.get(key)); - } - } - - @Test - public void batchDelete() throws Exception { - try (Table testTable = createTypedTable( - "Fourth"); - BatchOperation batch = rdbStore.initBatchOperation()) { - - //given - String key = - RandomStringUtils.random(10); - String value = - RandomStringUtils.random(10); - testTable.put(key, value); - - //when - testTable.deleteWithBatch(batch, key); - rdbStore.commitBatchOperation(batch); - - //then - Assert.assertNull(testTable.get(key)); - } - } - - private static boolean consume(Table.KeyValue keyValue) { - count++; - try { - Assert.assertNotNull(keyValue.getKey()); - } catch (IOException ex) { - Assert.fail(ex.toString()); - } - return true; - } - - @Test - public void forEachAndIterator() throws Exception { - final int iterCount = 100; - try (Table testTable = createTypedTable( - "Sixth")) { - for (int x = 0; x < iterCount; x++) { - String key = - RandomStringUtils.random(10); - String value = - RandomStringUtils.random(10); - testTable.put(key, value); - } - int localCount = 0; - - try (TableIterator> iter = - testTable.iterator()) { - while (iter.hasNext()) { - Table.KeyValue keyValue = iter.next(); - localCount++; - } - - Assert.assertEquals(iterCount, localCount); - iter.seekToFirst(); - iter.forEachRemaining(TestTypedRDBTableStore::consume); - Assert.assertEquals(iterCount, count); - - } - } - } - - @Test - public void testTypedTableWithCache() throws Exception { - int iterCount = 10; - try (Table testTable = createTypedTable( - "Seven")) { - - for (int x = 0; x < iterCount; x++) { - String key = Integer.toString(x); - String value = Integer.toString(x); - testTable.addCacheEntry(new CacheKey<>(key), - new CacheValue<>(Optional.of(value), - x)); - } - - // As we have added to cache, so get should return value even if it - // does not exist in DB. - for (int x = 0; x < iterCount; x++) { - Assert.assertEquals(Integer.toString(1), - testTable.get(Integer.toString(1))); - } - - } - } - - @Test - public void testTypedTableWithCacheWithFewDeletedOperationType() - throws Exception { - int iterCount = 10; - try (Table testTable = createTypedTable( - "Seven")) { - - for (int x = 0; x < iterCount; x++) { - String key = Integer.toString(x); - String value = Integer.toString(x); - if (x % 2 == 0) { - testTable.addCacheEntry(new CacheKey<>(key), - new CacheValue<>(Optional.of(value), x)); - } else { - testTable.addCacheEntry(new CacheKey<>(key), - new CacheValue<>(Optional.absent(), - x)); - } - } - - // As we have added to cache, so get should return value even if it - // does not exist in DB. - for (int x = 0; x < iterCount; x++) { - if (x % 2 == 0) { - Assert.assertEquals(Integer.toString(x), - testTable.get(Integer.toString(x))); - } else { - Assert.assertNull(testTable.get(Integer.toString(x))); - } - } - - testTable.cleanupCache(5); - - GenericTestUtils.waitFor(() -> - ((TypedTable) testTable).getCache().size() == 4, - 100, 5000); - - - //Check remaining values - for (int x = 6; x < iterCount; x++) { - if (x % 2 == 0) { - Assert.assertEquals(Integer.toString(x), - testTable.get(Integer.toString(x))); - } else { - Assert.assertNull(testTable.get(Integer.toString(x))); - } - } - - - } - } - - @Test - public void testIsExist() throws Exception { - try (Table testTable = createTypedTable( - "Eighth")) { - String key = - RandomStringUtils.random(10); - String value = RandomStringUtils.random(10); - testTable.put(key, value); - Assert.assertTrue(testTable.isExist(key)); - - String invalidKey = key + RandomStringUtils.random(1); - Assert.assertFalse(testTable.isExist(invalidKey)); - - testTable.delete(key); - Assert.assertFalse(testTable.isExist(key)); - } - } - - @Test - public void testIsExistCache() throws Exception { - try (Table testTable = createTypedTable( - "Eighth")) { - String key = - RandomStringUtils.random(10); - String value = RandomStringUtils.random(10); - testTable.addCacheEntry(new CacheKey<>(key), - new CacheValue<>(Optional.of(value), 1L)); - Assert.assertTrue(testTable.isExist(key)); - - testTable.addCacheEntry(new CacheKey<>(key), - new CacheValue<>(Optional.absent(), 1L)); - Assert.assertFalse(testTable.isExist(key)); - } - } - - @Test - public void testCountEstimatedRowsInTable() throws Exception { - try (Table testTable = createTypedTable( - "Ninth")) { - // Add a few keys - final int numKeys = 12345; - for (int i = 0; i < numKeys; i++) { - String key = - RandomStringUtils.random(10); - String value = RandomStringUtils.random(10); - testTable.put(key, value); - } - long keyCount = testTable.getEstimatedKeyCount(); - // The result should be larger than zero but not exceed(?) numKeys - Assert.assertTrue(keyCount > 0 && keyCount <= numKeys); - } - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCacheImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCacheImpl.java deleted file mode 100644 index 42391297a0a61..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCacheImpl.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db.cache; - -import java.util.Arrays; -import java.util.Collection; -import java.util.concurrent.CompletableFuture; - -import com.google.common.base.Optional; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import static org.junit.Assert.fail; - -/** - * Class tests partial table cache. - */ -@RunWith(value = Parameterized.class) -public class TestTableCacheImpl { - private TableCache, CacheValue> tableCache; - - private final TableCacheImpl.CacheCleanupPolicy cacheCleanupPolicy; - - - @Parameterized.Parameters - public static Collection policy() { - Object[][] params = new Object[][] { - {TableCacheImpl.CacheCleanupPolicy.NEVER}, - {TableCacheImpl.CacheCleanupPolicy.MANUAL} - }; - return Arrays.asList(params); - } - - public TestTableCacheImpl( - TableCacheImpl.CacheCleanupPolicy cacheCleanupPolicy) { - this.cacheCleanupPolicy = cacheCleanupPolicy; - } - - - @Before - public void create() { - tableCache = - new TableCacheImpl<>(cacheCleanupPolicy); - } - @Test - public void testPartialTableCache() { - - - for (int i = 0; i< 10; i++) { - tableCache.put(new CacheKey<>(Integer.toString(i)), - new CacheValue<>(Optional.of(Integer.toString(i)), i)); - } - - - for (int i=0; i < 10; i++) { - Assert.assertEquals(Integer.toString(i), - tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); - } - - // On a full table cache if some one calls cleanup it is a no-op. - tableCache.cleanup(4); - - for (int i=5; i < 10; i++) { - Assert.assertEquals(Integer.toString(i), - tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); - } - } - - - @Test - public void testPartialTableCacheParallel() throws Exception { - - int totalCount = 0; - CompletableFuture future = - CompletableFuture.supplyAsync(() -> { - try { - return writeToCache(10, 1, 0); - } catch (InterruptedException ex) { - fail("writeToCache got interrupt exception"); - } - return 0; - }); - int value = future.get(); - Assert.assertEquals(10, value); - - totalCount += value; - - future = - CompletableFuture.supplyAsync(() -> { - try { - return writeToCache(10, 11, 100); - } catch (InterruptedException ex) { - fail("writeToCache got interrupt exception"); - } - return 0; - }); - - // Check we have first 10 entries in cache. - for (int i=1; i <= 10; i++) { - Assert.assertEquals(Integer.toString(i), - tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); - } - - - value = future.get(); - Assert.assertEquals(10, value); - - totalCount += value; - - if (cacheCleanupPolicy == TableCacheImpl.CacheCleanupPolicy.MANUAL) { - int deleted = 5; - - // cleanup first 5 entires - tableCache.cleanup(deleted); - - // We should totalCount - deleted entries in cache. - final int tc = totalCount; - GenericTestUtils.waitFor(() -> (tc - deleted == tableCache.size()), 100, - 5000); - // Check if we have remaining entries. - for (int i=6; i <= totalCount; i++) { - Assert.assertEquals(Integer.toString(i), tableCache.get( - new CacheKey<>(Integer.toString(i))).getCacheValue()); - } - tableCache.cleanup(10); - - tableCache.cleanup(totalCount); - - // Cleaned up all entries, so cache size should be zero. - GenericTestUtils.waitFor(() -> (0 == tableCache.size()), 100, - 5000); - } else { - tableCache.cleanup(totalCount); - Assert.assertEquals(totalCount, tableCache.size()); - } - - - } - - private int writeToCache(int count, int startVal, long sleep) - throws InterruptedException { - int counter = 1; - while (counter <= count){ - tableCache.put(new CacheKey<>(Integer.toString(startVal)), - new CacheValue<>(Optional.of(Integer.toString(startVal)), startVal)); - startVal++; - counter++; - Thread.sleep(sleep); - } - return count; - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java deleted file mode 100644 index f97fda2d81b95..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Tests for the DB Cache Utilities. - */ -package org.apache.hadoop.hdds.utils.db.cache; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/package-info.java deleted file mode 100644 index f1c7ce139a892..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Tests for the DB Utilities. - */ -package org.apache.hadoop.hdds.utils.db; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/package-info.java deleted file mode 100644 index f93e3fd68d2f0..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * DB test Utils. - */ -package org.apache.hadoop.hdds.utils; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java deleted file mode 100644 index 789560a2c3a6e..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.audit; - -/** - * Enum to define Dummy AuditAction Type for test. - */ -public enum DummyAction implements AuditAction { - - CREATE_VOLUME, - CREATE_BUCKET, - READ_VOLUME, - READ_BUCKET, - READ_KEY, - UPDATE_VOLUME, - UPDATE_BUCKET, - UPDATE_KEY, - DELETE_VOLUME, - DELETE_BUCKET, - DELETE_KEY, - SET_OWNER, - SET_QUOTA; - - @Override - public String getAction() { - return this.toString(); - } - -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java deleted file mode 100644 index 0c2d98fab295f..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.audit; - -import java.util.HashMap; -import java.util.Map; - -/** - * DummyEntity that implements Auditable for test purpose. - */ -public class DummyEntity implements Auditable { - - private String key1; - private String key2; - - public DummyEntity(){ - this.key1 = "value1"; - this.key2 = "value2"; - } - public String getKey1() { - return key1; - } - - public void setKey1(String key1) { - this.key1 = key1; - } - - public String getKey2() { - return key2; - } - - public void setKey2(String key2) { - this.key2 = key2; - } - - @Override - public Map toAuditMap() { - Map auditMap = new HashMap<>(); - auditMap.put("key1", this.key1); - auditMap.put("key2", this.key2); - return auditMap; - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java deleted file mode 100644 index 518ddaedcf75f..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java +++ /dev/null @@ -1,166 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.audit; - -import org.apache.commons.io.FileUtils; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertTrue; - -/** - * Test Ozone Audit Logger. - */ -public class TestOzoneAuditLogger { - - private static final Logger LOG = - LoggerFactory.getLogger(TestOzoneAuditLogger.class.getName()); - - private static final AuditLogger AUDIT = - new AuditLogger(AuditLoggerType.OMLOGGER); - - private static final String SUCCESS = AuditEventStatus.SUCCESS.name(); - private static final String FAILURE = AuditEventStatus.FAILURE.name(); - - private static final Map PARAMS = - new DummyEntity().toAuditMap(); - - private static final AuditMessage WRITE_FAIL_MSG = - new AuditMessage.Builder() - .setUser("john") - .atIp("192.168.0.1") - .forOperation(DummyAction.CREATE_VOLUME.name()) - .withParams(PARAMS) - .withResult(FAILURE) - .withException(null).build(); - - private static final AuditMessage WRITE_SUCCESS_MSG = - new AuditMessage.Builder() - .setUser("john") - .atIp("192.168.0.1") - .forOperation(DummyAction.CREATE_VOLUME.name()) - .withParams(PARAMS) - .withResult(SUCCESS) - .withException(null).build(); - - private static final AuditMessage READ_FAIL_MSG = - new AuditMessage.Builder() - .setUser("john") - .atIp("192.168.0.1") - .forOperation(DummyAction.READ_VOLUME.name()) - .withParams(PARAMS) - .withResult(FAILURE) - .withException(null).build(); - - private static final AuditMessage READ_SUCCESS_MSG = - new AuditMessage.Builder() - .setUser("john") - .atIp("192.168.0.1") - .forOperation(DummyAction.READ_VOLUME.name()) - .withParams(PARAMS) - .withResult(SUCCESS) - .withException(null).build(); - - @BeforeClass - public static void setUp(){ - System.setProperty("log4j.configurationFile", "log4j2.properties"); - } - - @AfterClass - public static void tearDown() { - File file = new File("audit.log"); - if (FileUtils.deleteQuietly(file)) { - LOG.info(file.getName() + - " has been deleted as all tests have completed."); - } else { - LOG.info("audit.log could not be deleted."); - } - } - - /** - * Test to verify default log level is INFO when logging success events. - */ - @Test - public void verifyDefaultLogLevelForSuccess() throws IOException { - AUDIT.logWriteSuccess(WRITE_SUCCESS_MSG); - String expected = - "INFO | OMAudit | " + WRITE_SUCCESS_MSG.getFormattedMessage(); - verifyLog(expected); - } - - /** - * Test to verify default log level is ERROR when logging failure events. - */ - @Test - public void verifyDefaultLogLevelForFailure() throws IOException { - AUDIT.logWriteFailure(WRITE_FAIL_MSG); - String expected = - "ERROR | OMAudit | " + WRITE_FAIL_MSG.getFormattedMessage(); - verifyLog(expected); - } - - /** - * Test to verify no READ event is logged. - */ - @Test - public void notLogReadEvents() throws IOException { - AUDIT.logReadSuccess(READ_SUCCESS_MSG); - AUDIT.logReadFailure(READ_FAIL_MSG); - verifyNoLog(); - } - - private void verifyLog(String expected) throws IOException { - File file = new File("audit.log"); - List lines = FileUtils.readLines(file, (String)null); - final int retry = 5; - int i = 0; - while (lines.isEmpty() && i < retry) { - lines = FileUtils.readLines(file, (String)null); - try { - Thread.sleep(500 * (i + 1)); - } catch(InterruptedException ie) { - Thread.currentThread().interrupt(); - break; - } - i++; - } - - // When log entry is expected, the log file will contain one line and - // that must be equal to the expected string - assertTrue(lines.size() != 0); - assertTrue(expected.equalsIgnoreCase(lines.get(0))); - //empty the file - lines.clear(); - FileUtils.writeLines(file, lines, false); - } - - private void verifyNoLog() throws IOException { - File file = new File("audit.log"); - List lines = FileUtils.readLines(file, (String)null); - // When no log entry is expected, the log file must be empty - assertTrue(lines.size() == 0); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/package-info.java deleted file mode 100644 index 1222ad04e0820..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.audit; -/** - * Unit tests of Ozone Audit Logger. - * For test purpose, the log4j2 configuration is loaded from file at: - * src/test/resources/log4j2.properties - */ diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java deleted file mode 100644 index 819c29fd61054..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.junit.Assert; -import org.junit.Test; - -/** - * Tests for {@link Checksum} class. - */ -public class TestChecksum { - - private static final int BYTES_PER_CHECKSUM = 10; - private static final ContainerProtos.ChecksumType CHECKSUM_TYPE_DEFAULT = - ContainerProtos.ChecksumType.SHA256; - - private Checksum getChecksum(ContainerProtos.ChecksumType type) { - if (type == null) { - type = CHECKSUM_TYPE_DEFAULT; - } - return new Checksum(type, BYTES_PER_CHECKSUM); - } - - /** - * Tests {@link Checksum#verifyChecksum(byte[], ChecksumData)}. - */ - @Test - public void testVerifyChecksum() throws Exception { - Checksum checksum = getChecksum(null); - int dataLen = 55; - byte[] data = RandomStringUtils.randomAlphabetic(dataLen).getBytes(); - - ChecksumData checksumData = checksum.computeChecksum(data); - - // A checksum is calculate for each bytesPerChecksum number of bytes in - // the data. Since that value is 10 here and the data length is 55, we - // should have 6 checksums in checksumData. - Assert.assertEquals(6, checksumData.getChecksums().size()); - - // Checksum verification should pass - Assert.assertTrue("Checksum mismatch", - Checksum.verifyChecksum(data, checksumData)); - } - - /** - * Tests that if data is modified, then the checksums should not match. - */ - @Test - public void testIncorrectChecksum() throws Exception { - Checksum checksum = getChecksum(null); - byte[] data = RandomStringUtils.randomAlphabetic(55).getBytes(); - ChecksumData originalChecksumData = checksum.computeChecksum(data); - - // Change the data and check if new checksum matches the original checksum. - // Modifying one byte of data should be enough for the checksum data to - // mismatch - data[50] = (byte) (data[50]+1); - ChecksumData newChecksumData = checksum.computeChecksum(data); - Assert.assertNotEquals("Checksums should not match for different data", - originalChecksumData, newChecksumData); - } - - /** - * Tests that checksum calculated using two different checksumTypes should - * not match. - */ - @Test - public void testChecksumMismatchForDifferentChecksumTypes() throws Exception { - byte[] data = RandomStringUtils.randomAlphabetic(55).getBytes(); - - // Checksum1 of type SHA-256 - Checksum checksum1 = getChecksum(null); - ChecksumData checksumData1 = checksum1.computeChecksum(data); - - // Checksum2 of type CRC32 - Checksum checksum2 = getChecksum(ContainerProtos.ChecksumType.CRC32); - ChecksumData checksumData2 = checksum2.computeChecksum(data); - - // The two checksums should not match as they have different types - Assert.assertNotEquals( - "Checksums should not match for different checksum types", - checksum1, checksum2); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java deleted file mode 100644 index 2f466377b4b2c..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import org.apache.hadoop.util.PureJavaCrc32; -import org.apache.hadoop.util.PureJavaCrc32C; -import org.junit.Assert; -import org.junit.Test; - -import java.nio.charset.StandardCharsets; -import java.util.Random; -import java.util.zip.Checksum; - -/** - * Test {@link ChecksumByteBuffer} implementations. - */ -public class TestChecksumByteBuffer { - @Test - public void testPureJavaCrc32ByteBuffer() { - final Checksum expected = new PureJavaCrc32(); - final ChecksumByteBuffer testee = new PureJavaCrc32ByteBuffer(); - new VerifyChecksumByteBuffer(expected, testee).testCorrectness(); - } - - @Test - public void testPureJavaCrc32CByteBuffer() { - final Checksum expected = new PureJavaCrc32C(); - final ChecksumByteBuffer testee = new PureJavaCrc32CByteBuffer(); - new VerifyChecksumByteBuffer(expected, testee).testCorrectness(); - } - - static class VerifyChecksumByteBuffer { - private final Checksum expected; - private final ChecksumByteBuffer testee; - - VerifyChecksumByteBuffer(Checksum expected, ChecksumByteBuffer testee) { - this.expected = expected; - this.testee = testee; - } - - void testCorrectness() { - checkSame(); - - checkBytes("hello world!".getBytes(StandardCharsets.UTF_8)); - - final Random random = new Random(); - final byte[] bytes = new byte[1 << 10]; - for (int i = 0; i < 1000; i++) { - random.nextBytes(bytes); - checkBytes(bytes, random.nextInt(bytes.length)); - } - } - - void checkBytes(byte[] bytes) { - checkBytes(bytes, bytes.length); - } - - void checkBytes(byte[] bytes, int length) { - expected.reset(); - testee.reset(); - checkSame(); - - for (byte b : bytes) { - expected.update(b); - testee.update(b); - checkSame(); - } - - expected.reset(); - testee.reset(); - - for (int i = 0; i < length; i++) { - expected.update(bytes, 0, i); - testee.update(bytes, 0, i); - checkSame(); - } - - expected.reset(); - testee.reset(); - checkSame(); - } - - private void checkSame() { - Assert.assertEquals(expected.getValue(), testee.getValue()); - } - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java deleted file mode 100644 index c1470bb2efc98..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.common; - -import org.apache.commons.collections.SetUtils; -import org.apache.hadoop.ozone.common.statemachine - .InvalidStateTransitionException; -import org.apache.hadoop.ozone.common.statemachine.StateMachine; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.util.HashSet; -import java.util.Set; - -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.CLEANUP; -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.CLOSED; -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.CREATING; -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.FINAL; -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.INIT; -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES - .OPERATIONAL; - -/** - * This class is to test ozone common state machine. - */ -public class TestStateMachine { - - /** - * STATES used by the test state machine. - */ - public enum STATES {INIT, CREATING, OPERATIONAL, CLOSED, CLEANUP, FINAL}; - - /** - * EVENTS used by the test state machine. - */ - public enum EVENTS {ALLOCATE, CREATE, UPDATE, CLOSE, DELETE, TIMEOUT}; - - @Rule - public ExpectedException exception = ExpectedException.none(); - - @Test - public void testStateMachineStates() throws InvalidStateTransitionException { - Set finals = new HashSet<>(); - finals.add(FINAL); - - StateMachine stateMachine = - new StateMachine<>(INIT, finals); - - stateMachine.addTransition(INIT, CREATING, EVENTS.ALLOCATE); - stateMachine.addTransition(CREATING, OPERATIONAL, EVENTS.CREATE); - stateMachine.addTransition(OPERATIONAL, OPERATIONAL, EVENTS.UPDATE); - stateMachine.addTransition(OPERATIONAL, CLEANUP, EVENTS.DELETE); - stateMachine.addTransition(OPERATIONAL, CLOSED, EVENTS.CLOSE); - stateMachine.addTransition(CREATING, CLEANUP, EVENTS.TIMEOUT); - - // Initial and Final states - Assert.assertEquals("Initial State", INIT, stateMachine.getInitialState()); - Assert.assertTrue("Final States", SetUtils.isEqualSet(finals, - stateMachine.getFinalStates())); - - // Valid state transitions - Assert.assertEquals("STATE should be OPERATIONAL after being created", - OPERATIONAL, stateMachine.getNextState(CREATING, EVENTS.CREATE)); - Assert.assertEquals("STATE should be OPERATIONAL after being updated", - OPERATIONAL, stateMachine.getNextState(OPERATIONAL, EVENTS.UPDATE)); - Assert.assertEquals("STATE should be CLEANUP after being deleted", - CLEANUP, stateMachine.getNextState(OPERATIONAL, EVENTS.DELETE)); - Assert.assertEquals("STATE should be CLEANUP after being timeout", - CLEANUP, stateMachine.getNextState(CREATING, EVENTS.TIMEOUT)); - Assert.assertEquals("STATE should be CLOSED after being closed", - CLOSED, stateMachine.getNextState(OPERATIONAL, EVENTS.CLOSE)); - - // Negative cases: invalid transition - expectException(); - stateMachine.getNextState(OPERATIONAL, EVENTS.CREATE); - - expectException(); - stateMachine.getNextState(CREATING, EVENTS.CLOSE); - } - - /** - * We expect an InvalidStateTransitionException. - */ - private void expectException() { - exception.expect(InvalidStateTransitionException.class); - exception.expectMessage("Invalid event"); - } - -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java deleted file mode 100644 index 38878334f578e..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java +++ /dev/null @@ -1,388 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * A generic lease management API which can be used if a service - * needs any kind of lease management. - */ - -package org.apache.hadoop.ozone.lease; - -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.util.HashMap; -import java.util.Map; - -/** - * Test class to check functionality and consistency of LeaseManager. - */ -public class TestLeaseManager { - - @Rule - public ExpectedException exception = ExpectedException.none(); - - /** - * Dummy resource on which leases can be acquired. - */ - private static final class DummyResource { - - private final String name; - - private DummyResource(String name) { - this.name = name; - } - - @Override - public int hashCode() { - return name.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if(obj instanceof DummyResource) { - return name.equals(((DummyResource) obj).name); - } - return false; - } - - /** - * Adding to String method to fix the ErrorProne warning that this method - * is later used in String functions, which would print out (e.g. - * `org.apache.hadoop.ozone.lease.TestLeaseManager.DummyResource@ - * 4488aabb`) instead of useful information. - * - * @return Name of the Dummy object. - */ - @Override - public String toString() { - return "DummyResource{" + - "name='" + name + '\'' + - '}'; - } - } - - @Test - public void testLeaseAcquireAndRelease() throws LeaseException { - //It is assumed that the test case execution won't take more than 5 seconds, - //if it takes more time increase the defaultTimeout value of LeaseManager. - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - DummyResource resourceTwo = new DummyResource("two"); - DummyResource resourceThree = new DummyResource("three"); - Lease leaseOne = manager.acquire(resourceOne); - Lease leaseTwo = manager.acquire(resourceTwo); - Lease leaseThree = manager.acquire(resourceThree); - Assert.assertEquals(leaseOne, manager.get(resourceOne)); - Assert.assertEquals(leaseTwo, manager.get(resourceTwo)); - Assert.assertEquals(leaseThree, manager.get(resourceThree)); - Assert.assertFalse(leaseOne.hasExpired()); - Assert.assertFalse(leaseTwo.hasExpired()); - Assert.assertFalse(leaseThree.hasExpired()); - //The below releases should not throw LeaseNotFoundException. - manager.release(resourceOne); - manager.release(resourceTwo); - manager.release(resourceThree); - Assert.assertTrue(leaseOne.hasExpired()); - Assert.assertTrue(leaseTwo.hasExpired()); - Assert.assertTrue(leaseThree.hasExpired()); - manager.shutdown(); - } - - @Test - public void testLeaseAlreadyExist() throws LeaseException { - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - DummyResource resourceTwo = new DummyResource("two"); - Lease leaseOne = manager.acquire(resourceOne); - Lease leaseTwo = manager.acquire(resourceTwo); - Assert.assertEquals(leaseOne, manager.get(resourceOne)); - Assert.assertEquals(leaseTwo, manager.get(resourceTwo)); - - exception.expect(LeaseAlreadyExistException.class); - exception.expectMessage("Resource: " + resourceOne); - manager.acquire(resourceOne); - - manager.release(resourceOne); - manager.release(resourceTwo); - manager.shutdown(); - } - - @Test - public void testLeaseNotFound() throws LeaseException, InterruptedException { - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - DummyResource resourceTwo = new DummyResource("two"); - DummyResource resourceThree = new DummyResource("three"); - - //Case 1: lease was never acquired. - exception.expect(LeaseNotFoundException.class); - exception.expectMessage("Resource: " + resourceOne); - manager.get(resourceOne); - - //Case 2: lease is acquired and released. - Lease leaseTwo = manager.acquire(resourceTwo); - Assert.assertEquals(leaseTwo, manager.get(resourceTwo)); - Assert.assertFalse(leaseTwo.hasExpired()); - manager.release(resourceTwo); - Assert.assertTrue(leaseTwo.hasExpired()); - exception.expect(LeaseNotFoundException.class); - exception.expectMessage("Resource: " + resourceTwo); - manager.get(resourceTwo); - - //Case 3: lease acquired and timed out. - Lease leaseThree = manager.acquire(resourceThree); - Assert.assertEquals(leaseThree, manager.get(resourceThree)); - Assert.assertFalse(leaseThree.hasExpired()); - long sleepTime = leaseThree.getRemainingTime() + 1000; - try { - Thread.sleep(sleepTime); - } catch (InterruptedException ex) { - //even in case of interrupt we have to wait till lease times out. - Thread.sleep(sleepTime); - } - Assert.assertTrue(leaseThree.hasExpired()); - exception.expect(LeaseNotFoundException.class); - exception.expectMessage("Resource: " + resourceThree); - manager.get(resourceThree); - manager.shutdown(); - } - - @Test - public void testCustomLeaseTimeout() throws LeaseException { - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - DummyResource resourceTwo = new DummyResource("two"); - DummyResource resourceThree = new DummyResource("three"); - Lease leaseOne = manager.acquire(resourceOne); - Lease leaseTwo = manager.acquire(resourceTwo, 10000); - Lease leaseThree = manager.acquire(resourceThree, 50000); - Assert.assertEquals(leaseOne, manager.get(resourceOne)); - Assert.assertEquals(leaseTwo, manager.get(resourceTwo)); - Assert.assertEquals(leaseThree, manager.get(resourceThree)); - Assert.assertFalse(leaseOne.hasExpired()); - Assert.assertFalse(leaseTwo.hasExpired()); - Assert.assertFalse(leaseThree.hasExpired()); - Assert.assertEquals(5000, leaseOne.getLeaseLifeTime()); - Assert.assertEquals(10000, leaseTwo.getLeaseLifeTime()); - Assert.assertEquals(50000, leaseThree.getLeaseLifeTime()); - // Releasing of leases is done in shutdown, so don't have to worry about - // lease release - manager.shutdown(); - } - - @Test - public void testLeaseCallback() throws LeaseException, InterruptedException { - Map leaseStatus = new HashMap<>(); - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - Lease leaseOne = manager.acquire(resourceOne); - leaseStatus.put(resourceOne, "lease in use"); - leaseOne.registerCallBack(() -> { - leaseStatus.put(resourceOne, "lease expired"); - return null; - }); - // wait for lease to expire - long sleepTime = leaseOne.getRemainingTime() + 1000; - try { - Thread.sleep(sleepTime); - } catch (InterruptedException ex) { - //even in case of interrupt we have to wait till lease times out. - Thread.sleep(sleepTime); - } - Assert.assertTrue(leaseOne.hasExpired()); - exception.expect(LeaseNotFoundException.class); - exception.expectMessage("Resource: " + resourceOne); - manager.get(resourceOne); - // check if callback has been executed - Assert.assertEquals("lease expired", leaseStatus.get(resourceOne)); - } - - @Test - public void testCallbackExecutionInCaseOfLeaseRelease() - throws LeaseException, InterruptedException { - // Callbacks should not be executed in case of lease release - Map leaseStatus = new HashMap<>(); - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - Lease leaseOne = manager.acquire(resourceOne); - leaseStatus.put(resourceOne, "lease in use"); - leaseOne.registerCallBack(() -> { - leaseStatus.put(resourceOne, "lease expired"); - return null; - }); - leaseStatus.put(resourceOne, "lease released"); - manager.release(resourceOne); - Assert.assertTrue(leaseOne.hasExpired()); - exception.expect(LeaseNotFoundException.class); - exception.expectMessage("Resource: " + resourceOne); - manager.get(resourceOne); - Assert.assertEquals("lease released", leaseStatus.get(resourceOne)); - } - - @Test - public void testLeaseCallbackWithMultipleLeases() - throws LeaseException, InterruptedException { - Map leaseStatus = new HashMap<>(); - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - DummyResource resourceTwo = new DummyResource("two"); - DummyResource resourceThree = new DummyResource("three"); - DummyResource resourceFour = new DummyResource("four"); - DummyResource resourceFive = new DummyResource("five"); - Lease leaseOne = manager.acquire(resourceOne); - Lease leaseTwo = manager.acquire(resourceTwo); - Lease leaseThree = manager.acquire(resourceThree); - Lease leaseFour = manager.acquire(resourceFour); - Lease leaseFive = manager.acquire(resourceFive); - leaseStatus.put(resourceOne, "lease in use"); - leaseStatus.put(resourceTwo, "lease in use"); - leaseStatus.put(resourceThree, "lease in use"); - leaseStatus.put(resourceFour, "lease in use"); - leaseStatus.put(resourceFive, "lease in use"); - leaseOne.registerCallBack(() -> { - leaseStatus.put(resourceOne, "lease expired"); - return null; - }); - leaseTwo.registerCallBack(() -> { - leaseStatus.put(resourceTwo, "lease expired"); - return null; - }); - leaseThree.registerCallBack(() -> { - leaseStatus.put(resourceThree, "lease expired"); - return null; - }); - leaseFour.registerCallBack(() -> { - leaseStatus.put(resourceFour, "lease expired"); - return null; - }); - leaseFive.registerCallBack(() -> { - leaseStatus.put(resourceFive, "lease expired"); - return null; - }); - - // release lease one, two and three - leaseStatus.put(resourceOne, "lease released"); - manager.release(resourceOne); - leaseStatus.put(resourceTwo, "lease released"); - manager.release(resourceTwo); - leaseStatus.put(resourceThree, "lease released"); - manager.release(resourceThree); - - // wait for other leases to expire - long sleepTime = leaseFive.getRemainingTime() + 1000; - - try { - Thread.sleep(sleepTime); - } catch (InterruptedException ex) { - //even in case of interrupt we have to wait till lease times out. - Thread.sleep(sleepTime); - } - Assert.assertTrue(leaseOne.hasExpired()); - Assert.assertTrue(leaseTwo.hasExpired()); - Assert.assertTrue(leaseThree.hasExpired()); - Assert.assertTrue(leaseFour.hasExpired()); - Assert.assertTrue(leaseFive.hasExpired()); - - Assert.assertEquals("lease released", leaseStatus.get(resourceOne)); - Assert.assertEquals("lease released", leaseStatus.get(resourceTwo)); - Assert.assertEquals("lease released", leaseStatus.get(resourceThree)); - Assert.assertEquals("lease expired", leaseStatus.get(resourceFour)); - Assert.assertEquals("lease expired", leaseStatus.get(resourceFive)); - manager.shutdown(); - } - - @Test - public void testReuseReleasedLease() throws LeaseException { - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - Lease leaseOne = manager.acquire(resourceOne); - Assert.assertEquals(leaseOne, manager.get(resourceOne)); - Assert.assertFalse(leaseOne.hasExpired()); - - manager.release(resourceOne); - Assert.assertTrue(leaseOne.hasExpired()); - - Lease sameResourceLease = manager.acquire(resourceOne); - Assert.assertEquals(sameResourceLease, manager.get(resourceOne)); - Assert.assertFalse(sameResourceLease.hasExpired()); - - manager.release(resourceOne); - Assert.assertTrue(sameResourceLease.hasExpired()); - manager.shutdown(); - } - - @Test - public void testReuseTimedOutLease() - throws LeaseException, InterruptedException { - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - Lease leaseOne = manager.acquire(resourceOne); - Assert.assertEquals(leaseOne, manager.get(resourceOne)); - Assert.assertFalse(leaseOne.hasExpired()); - // wait for lease to expire - long sleepTime = leaseOne.getRemainingTime() + 1000; - try { - Thread.sleep(sleepTime); - } catch (InterruptedException ex) { - //even in case of interrupt we have to wait till lease times out. - Thread.sleep(sleepTime); - } - Assert.assertTrue(leaseOne.hasExpired()); - - Lease sameResourceLease = manager.acquire(resourceOne); - Assert.assertEquals(sameResourceLease, manager.get(resourceOne)); - Assert.assertFalse(sameResourceLease.hasExpired()); - - manager.release(resourceOne); - Assert.assertTrue(sameResourceLease.hasExpired()); - manager.shutdown(); - } - - @Test - public void testRenewLease() throws LeaseException, InterruptedException { - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - Lease leaseOne = manager.acquire(resourceOne); - Assert.assertEquals(leaseOne, manager.get(resourceOne)); - Assert.assertFalse(leaseOne.hasExpired()); - - // add 5 more seconds to the lease - leaseOne.renew(5000); - - Thread.sleep(5000); - - // lease should still be active - Assert.assertEquals(leaseOne, manager.get(resourceOne)); - Assert.assertFalse(leaseOne.hasExpired()); - manager.release(resourceOne); - manager.shutdown(); - } - -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/package-info.java deleted file mode 100644 index 1071309c730f8..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; -/* - This package contains lease management unit test classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java deleted file mode 100644 index e88b1bb121b62..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java +++ /dev/null @@ -1,173 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.junit.Assert; -import org.junit.Test; - -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * Test-cases to test LockManager. - */ -public class TestLockManager { - - @Test(timeout = 1000) - public void testWriteLockWithDifferentResource() { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - manager.writeLock("/resourceOne"); - // This should work, as they are different resource. - manager.writeLock("/resourceTwo"); - manager.writeUnlock("/resourceOne"); - manager.writeUnlock("/resourceTwo"); - Assert.assertTrue(true); - } - - @Test - public void testWriteLockWithSameResource() throws Exception { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - final AtomicBoolean gotLock = new AtomicBoolean(false); - manager.writeLock("/resourceOne"); - new Thread(() -> { - manager.writeLock("/resourceOne"); - gotLock.set(true); - manager.writeUnlock("/resourceOne"); - }).start(); - // Let's give some time for the other thread to run - Thread.sleep(100); - // Since the other thread is trying to get write lock on same object, - // it will wait. - Assert.assertFalse(gotLock.get()); - manager.writeUnlock("/resourceOne"); - // Since we have released the write lock, the other thread should have - // the lock now - // Let's give some time for the other thread to run - Thread.sleep(100); - Assert.assertTrue(gotLock.get()); - } - - @Test(timeout = 1000) - public void testReadLockWithDifferentResource() { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - manager.readLock("/resourceOne"); - manager.readLock("/resourceTwo"); - manager.readUnlock("/resourceOne"); - manager.readUnlock("/resourceTwo"); - Assert.assertTrue(true); - } - - @Test - public void testReadLockWithSameResource() throws Exception { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - final AtomicBoolean gotLock = new AtomicBoolean(false); - manager.readLock("/resourceOne"); - new Thread(() -> { - manager.readLock("/resourceOne"); - gotLock.set(true); - manager.readUnlock("/resourceOne"); - }).start(); - // Let's give some time for the other thread to run - Thread.sleep(100); - // Since the new thread is trying to get read lock, it should work. - Assert.assertTrue(gotLock.get()); - manager.readUnlock("/resourceOne"); - } - - @Test - public void testWriteReadLockWithSameResource() throws Exception { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - final AtomicBoolean gotLock = new AtomicBoolean(false); - manager.writeLock("/resourceOne"); - new Thread(() -> { - manager.readLock("/resourceOne"); - gotLock.set(true); - manager.readUnlock("/resourceOne"); - }).start(); - // Let's give some time for the other thread to run - Thread.sleep(100); - // Since the other thread is trying to get read lock on same object, - // it will wait. - Assert.assertFalse(gotLock.get()); - manager.writeUnlock("/resourceOne"); - // Since we have released the write lock, the other thread should have - // the lock now - // Let's give some time for the other thread to run - Thread.sleep(100); - Assert.assertTrue(gotLock.get()); - } - - @Test - public void testReadWriteLockWithSameResource() throws Exception { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - final AtomicBoolean gotLock = new AtomicBoolean(false); - manager.readLock("/resourceOne"); - new Thread(() -> { - manager.writeLock("/resourceOne"); - gotLock.set(true); - manager.writeUnlock("/resourceOne"); - }).start(); - // Let's give some time for the other thread to run - Thread.sleep(100); - // Since the other thread is trying to get write lock on same object, - // it will wait. - Assert.assertFalse(gotLock.get()); - manager.readUnlock("/resourceOne"); - // Since we have released the read lock, the other thread should have - // the lock now - // Let's give some time for the other thread to run - Thread.sleep(100); - Assert.assertTrue(gotLock.get()); - } - - @Test - public void testMultiReadWriteLockWithSameResource() throws Exception { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - final AtomicBoolean gotLock = new AtomicBoolean(false); - manager.readLock("/resourceOne"); - manager.readLock("/resourceOne"); - new Thread(() -> { - manager.writeLock("/resourceOne"); - gotLock.set(true); - manager.writeUnlock("/resourceOne"); - }).start(); - // Let's give some time for the other thread to run - Thread.sleep(100); - // Since the other thread is trying to get write lock on same object, - // it will wait. - Assert.assertFalse(gotLock.get()); - manager.readUnlock("/resourceOne"); - //We have only released one read lock, we still hold another read lock. - Thread.sleep(100); - Assert.assertFalse(gotLock.get()); - manager.readUnlock("/resourceOne"); - // Since we have released the read lock, the other thread should have - // the lock now - // Let's give some time for the other thread to run - Thread.sleep(100); - Assert.assertTrue(gotLock.get()); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java deleted file mode 100644 index a96bc16248cc4..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; -/* - This package contains the lock related test classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/package-info.java deleted file mode 100644 index 0030d2e9e1ce8..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; -/** - * Ozone related test helper classes and tests of common utils. - */ diff --git a/hadoop-hdds/common/src/test/resources/log4j2.properties b/hadoop-hdds/common/src/test/resources/log4j2.properties deleted file mode 100644 index cef69e11b0efd..0000000000000 --- a/hadoop-hdds/common/src/test/resources/log4j2.properties +++ /dev/null @@ -1,76 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with this -# work for additional information regarding copyright ownership. The ASF -# licenses this file to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -#

-# http://www.apache.org/licenses/LICENSE-2.0 -#

-# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. -# -name=PropertiesConfig - -# Checks for config change periodically and reloads -monitorInterval=5 - -filter=read, write -# filter.read.onMatch = DENY avoids logging all READ events -# filter.read.onMatch = ACCEPT permits logging all READ events -# The above two settings ignore the log levels in configuration -# filter.read.onMatch = NEUTRAL permits logging of only those READ events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.read.type = MarkerFilter -filter.read.marker = READ -filter.read.onMatch = DENY -filter.read.onMismatch = NEUTRAL - -# filter.write.onMatch = DENY avoids logging all WRITE events -# filter.write.onMatch = ACCEPT permits logging all WRITE events -# The above two settings ignore the log levels in configuration -# filter.write.onMatch = NEUTRAL permits logging of only those WRITE events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.write.type = MarkerFilter -filter.write.marker = WRITE -filter.write.onMatch = NEUTRAL -filter.write.onMismatch = NEUTRAL - -# Log Levels are organized from most specific to least: -# OFF (most specific, no logging) -# FATAL (most specific, little data) -# ERROR -# WARN -# INFO -# DEBUG -# TRACE (least specific, a lot of data) -# ALL (least specific, all data) - -appenders = console, audit -appender.console.type = Console -appender.console.name = STDOUT -appender.console.layout.type = PatternLayout -appender.console.layout.pattern = %-5level | %c{1} | %msg%n - -appender.audit.type = File -appender.audit.name = AUDITLOG -appender.audit.fileName=audit.log -appender.audit.layout.type=PatternLayout -appender.audit.layout.pattern= %-5level | %c{1} | %msg%n - -loggers=audit -logger.audit.type=AsyncLogger -logger.audit.name=OMAudit -logger.audit.level = INFO -logger.audit.appenderRefs = audit -logger.audit.appenderRef.file.ref = AUDITLOG - -rootLogger.level = INFO -rootLogger.appenderRefs = stdout -rootLogger.appenderRef.stdout.ref = STDOUT diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/enforce-error.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/enforce-error.xml deleted file mode 100644 index 58c5802d0a836..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/enforce-error.xml +++ /dev/null @@ -1,47 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - - - - 1 - InnerNode - - - - 0 - Leaf - - - - /datacenter/rack/nodegroup/node - true - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.xml deleted file mode 100644 index 25be9c2c5d705..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - 1 - - - - 1 - Root - - - rack - 1 - InnerNode - /default-rack - - - nodegroup - 1 - InnerNode - /default-nodegroup - - - - 0 - Leaf - - - - /datacenter/rack/nodegroup/node - true - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.yaml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.yaml deleted file mode 100644 index d5092ad0dbc75..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- -# Cost: The cost of crossing this layer. -# The value should be positive integer or 0. This field is optional. -# When it's not defined, it's value is default "1". -cost: 1 - -# The prefix of this layer. -# If the prefix is "dc", then every name in this layer should start with "dc", -# such as "dc1", "dc2". -# Note that unlike XML schema, the prefix must be specified explicitly if the type is InnerNode. -prefix: / - -# Layer type, optional field, default value InnerNode. -# Current value range : {ROOT, INNER_NODE, LEAF_NODE} -type: ROOT - -# Layer name -defaultName: root - -# The sub layer of current layer. We use list -sublayer: - - - cost: 1 - prefix: dc - defaultName: datacenter - type: INNER_NODE - sublayer: - - - cost: 1 - prefix: rack - defaultName: rack - type: INNER_NODE - sublayer: - - - cost: 1 - prefix: ng - defaultName: nodegroup - type: INNER_NODE - sublayer: - - - defaultName: node - type: LEAF_NODE - prefix: node -... \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-cost.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-cost.xml deleted file mode 100644 index cf934bc01912e..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-cost.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - -1 - InnerNode - default-rack - - - - 0 - leaf - - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-version.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-version.xml deleted file mode 100644 index d69aab14f13f0..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-version.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - a - - - - 1 - ROOT - - - rack - -1 - InnerNode - default-rack - - - - 0 - leaf - - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/middle-leaf.yaml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/middle-leaf.yaml deleted file mode 100644 index 0a2d490d5fa94..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/middle-leaf.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- -# Cost: The cost of crossing this layer. -# The value should be positive integer or 0. This field is optional. -# When it's not defined, it's value is default "1". -cost: 1 - -# The prefix of this layer. -# If the prefix is "dc", then every name in this layer should start with "dc", -# such as "dc1", "dc2". -# Note that unlike XML schema, the prefix must be specified explicitly if the type is InnerNode. -prefix: / - -# Layer type, optional field, default value InnerNode. -# Current value range : {ROOT, INNER_NODE, LEAF_NODE} -type: ROOT - -# Layer name -defaultName: root - -# The sub layer of current layer. We use list -sublayer: - - - cost: 1 - prefix: dc - defaultName: datacenter - type: INNER_NODE - sublayer: - - - cost: 1 - prefix: node - defaultName: rack - type: LEAF_NODE - sublayer: - - - cost: 1 - prefix: ng - defaultName: nodegroup - type: INNER_NODE - sublayer: - - - defaultName: node - type: LEAF_NODE - prefix: node -... \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-leaf.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-leaf.xml deleted file mode 100644 index a4297af4763ad..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-leaf.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - Leaf - default-rack - - - - 0 - Leaf - - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.xml deleted file mode 100644 index afc78160d9eb1..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - ROOT - default-rack - - - - 0 - Leaf - - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.yaml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.yaml deleted file mode 100644 index 536ed23eb6c84..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- -# Cost: The cost of crossing this layer. -# The value should be positive integer or 0. This field is optional. -# When it's not defined, it's value is default "1". -cost: 1 - -# The prefix of this layer. -# If the prefix is "dc", then every name in this layer should start with "dc", -# such as "dc1", "dc2". -# Note that unlike XML schema, the prefix must be specified explicitly if the type is InnerNode. -prefix: / - -# Layer type, optional field, default value InnerNode. -# Current value range : {ROOT, INNER_NODE, LEAF_NODE} -type: ROOT - -# Layer name -defaultName: root - -# The sub layer of current layer. We use list -sublayer: - - - cost: 1 - prefix: root - defaultName: root - type: ROOT - sublayer: - - - cost: 1 - prefix: rack - defaultName: rack - type: INNER_NODE - sublayer: - - - cost: 1 - prefix: ng - defaultName: nodegroup - type: INNER_NODE - sublayer: - - - defaultName: node - type: LEAF_NODE - prefix: node -... \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-topology.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-topology.xml deleted file mode 100644 index a7322ca9cd088..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-topology.xml +++ /dev/null @@ -1,47 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - Leaf - - - - /datacenter/rack/node - false - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-leaf.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-leaf.xml deleted file mode 100644 index fcc697c875f81..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-leaf.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - InnerNode - - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-root.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-root.xml deleted file mode 100644 index 940696c9414e9..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-root.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - InnerNode - - - rack - 1 - InnerNode - default-rack - - - - 0 - Leaf - - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-topology.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-topology.xml deleted file mode 100644 index c16e2165464ff..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-topology.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - LEAF - - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-layers-size-mismatch.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-layers-size-mismatch.xml deleted file mode 100644 index 2c30219c18965..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-layers-size-mismatch.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - Leaf - - - - /datacenter/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-with-id-reference-failure.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-with-id-reference-failure.xml deleted file mode 100644 index fac224be108d9..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-with-id-reference-failure.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - Leaf - - - - /datacenter/room/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/unknown-layer-type.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/unknown-layer-type.xml deleted file mode 100644 index d228eecbed1f4..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/unknown-layer-type.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - leaves - - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-1.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-1.xml deleted file mode 100644 index 221e10b5ad1e8..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-1.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - Leaf - - - - /rack/datacenter/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-2.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-2.xml deleted file mode 100644 index 51e579e3e60bc..0000000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-2.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - Leaf - - - - /datacenter/node/rack - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/test.db.ini b/hadoop-hdds/common/src/test/resources/test.db.ini deleted file mode 100644 index 6666cd28b2d51..0000000000000 --- a/hadoop-hdds/common/src/test/resources/test.db.ini +++ /dev/null @@ -1,145 +0,0 @@ -# This is a RocksDB option file. -# -# A typical RocksDB options file has four sections, which are -# Version section, DBOptions section, at least one CFOptions -# section, and one TableOptions section for each column family. -# The RocksDB options file in general follows the basic INI -# file format with the following extensions / modifications: -# -# * Escaped characters -# We escaped the following characters: -# - \n -- line feed - new line -# - \r -- carriage return -# - \\ -- backslash \ -# - \: -- colon symbol : -# - \# -- hash tag # -# * Comments -# We support # style comments. Comments can appear at the ending -# part of a line. -# * Statements -# A statement is of the form option_name = value. -# Each statement contains a '=', where extra white-spaces -# are supported. However, we don't support multi-lined statement. -# Furthermore, each line can only contain at most one statement. -# * Sections -# Sections are of the form [SecitonTitle "SectionArgument"], -# where section argument is optional. -# * List -# We use colon-separated string to represent a list. -# For instance, n1:n2:n3:n4 is a list containing four values. -# -# Below is an example of a RocksDB options file: - - -#----------------------IMPORTANT------------------------------------# -### FAKE VALUES FOR TESTING ONLY ### DO NOT USE THESE FOR PRODUCTION. -#----------------------IMPORTANT------------------------------------# -[DBOptions] - stats_dump_period_sec=600 - max_manifest_file_size=551615 - bytes_per_sync=8388608 - delayed_write_rate=2097152 - WAL_ttl_seconds=0 - WAL_size_limit_MB=0 - max_subcompactions=1 - wal_dir= - wal_bytes_per_sync=0 - db_write_buffer_size=0 - keep_log_file_num=1000 - table_cache_numshardbits=4 - max_file_opening_threads=1 - writable_file_max_buffer_size=1048576 - random_access_max_buffer_size=1048576 - use_fsync=false - max_total_wal_size=0 - max_open_files=-1 - skip_stats_update_on_db_open=false - max_background_compactions=16 - manifest_preallocation_size=4194304 - max_background_flushes=7 - is_fd_close_on_exec=true - max_log_file_size=0 - advise_random_on_open=true - create_missing_column_families=false - paranoid_checks=true - delete_obsolete_files_period_micros=21600000000 - log_file_time_to_roll=0 - compaction_readahead_size=0 - create_if_missing=false - use_adaptive_mutex=false - enable_thread_tracking=false - allow_fallocate=true - error_if_exists=false - recycle_log_file_num=0 - skip_log_error_on_recovery=false - db_log_dir= - new_table_reader_for_compaction_inputs=true - allow_mmap_reads=false - allow_mmap_writes=false - use_direct_reads=false - use_direct_writes=false - - -[CFOptions "default"] - compaction_style=kCompactionStyleLevel - compaction_filter=nullptr - num_levels=6 - table_factory=BlockBasedTable - comparator=leveldb.BytewiseComparator - max_sequential_skip_in_iterations=8 - soft_rate_limit=0.000000 - max_bytes_for_level_base=1073741824 - memtable_prefix_bloom_probes=6 - memtable_prefix_bloom_bits=0 - memtable_prefix_bloom_huge_page_tlb_size=0 - max_successive_merges=0 - arena_block_size=16777216 - min_write_buffer_number_to_merge=1 - target_file_size_multiplier=1 - source_compaction_factor=1 - max_bytes_for_level_multiplier=8 - max_bytes_for_level_multiplier_additional=2:3:5 - compaction_filter_factory=nullptr - max_write_buffer_number=8 - level0_stop_writes_trigger=20 - compression=kSnappyCompression - level0_file_num_compaction_trigger=4 - purge_redundant_kvs_while_flush=true - max_write_buffer_number_to_maintain=0 - memtable_factory=SkipListFactory - max_grandparent_overlap_factor=8 - expanded_compaction_factor=25 - hard_pending_compaction_bytes_limit=137438953472 - inplace_update_num_locks=10000 - level_compaction_dynamic_level_bytes=true - level0_slowdown_writes_trigger=12 - filter_deletes=false - verify_checksums_in_compaction=true - min_partial_merge_operands=2 - paranoid_file_checks=false - target_file_size_base=134217728 - optimize_filters_for_hits=false - merge_operator=PutOperator - compression_per_level=kNoCompression:kNoCompression:kNoCompression:kSnappyCompression:kSnappyCompression:kSnappyCompression - compaction_measure_io_stats=false - prefix_extractor=nullptr - bloom_locality=0 - write_buffer_size=134217728 - disable_auto_compactions=false - inplace_update_support=false - -[TableOptions/BlockBasedTable "default"] - format_version=2 - whole_key_filtering=true - no_block_cache=false - checksum=kCRC32c - filter_policy=rocksdb.BuiltinBloomFilter - block_size_deviation=10 - block_size=8192 - block_restart_interval=16 - cache_index_and_filter_blocks=false - pin_l0_filter_and_index_blocks_in_cache=false - pin_top_level_index_and_filter=false - index_type=kBinarySearch - hash_index_allow_collision=true - flush_block_policy_factory=FlushBlockBySizePolicyFactory \ No newline at end of file diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml deleted file mode 100644 index a5955498fff15..0000000000000 --- a/hadoop-hdds/config/pom.xml +++ /dev/null @@ -1,45 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-hdds - 0.5.0-SNAPSHOT - - hadoop-hdds-config - 0.5.0-SNAPSHOT - Apache Hadoop Distributed Data Store Config Tools - Apache Hadoop HDDS Config - jar - - - - - - - - - junit - junit - test - - - - - diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java deleted file mode 100644 index 70aa58d541756..0000000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.concurrent.TimeUnit; - -/** - * Mark field to be configurable from ozone-site.xml. - */ -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.METHOD) -public @interface Config { - - /** - * Configuration fragment relative to the prefix defined with @ConfigGroup. - */ - String key(); - - /** - * Default value to use if not set. - */ - String defaultValue(); - - /** - * Custom description as a help. - */ - String description(); - - /** - * Type of configuration. Use AUTO to decide it based on the java type. - */ - ConfigType type() default ConfigType.AUTO; - - /** - * If type == TIME the unit should be defined with this attribute. - */ - TimeUnit timeUnit() default TimeUnit.MILLISECONDS; - - ConfigTag[] tags(); -} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java deleted file mode 100644 index 9463f42909564..0000000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java +++ /dev/null @@ -1,127 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.transform.OutputKeys; -import javax.xml.transform.Transformer; -import javax.xml.transform.TransformerException; -import javax.xml.transform.TransformerFactory; -import javax.xml.transform.dom.DOMSource; -import javax.xml.transform.stream.StreamResult; -import java.io.InputStream; -import java.io.Writer; -import java.util.Arrays; -import java.util.stream.Collectors; - -import org.w3c.dom.Document; -import org.w3c.dom.Element; - -/** - * Simple DOM based config file writer. - *

- * This class can init/load existing ozone-default-generated.xml fragments - * and append new entries and write to the file system. - */ -public class ConfigFileAppender { - - private Document document; - - private final DocumentBuilder builder; - - public ConfigFileAppender() { - try { - DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - builder = factory.newDocumentBuilder(); - } catch (Exception ex) { - throw new ConfigurationException("Can initialize new configuration", ex); - } - } - - /** - * Initialize a new ozone-site.xml structure with empty content. - */ - public void init() { - try { - document = builder.newDocument(); - document.appendChild(document.createElement("configuration")); - } catch (Exception ex) { - throw new ConfigurationException("Can initialize new configuration", ex); - } - } - - /** - * Load existing ozone-site.xml content and parse the DOM tree. - */ - public void load(InputStream stream) { - try { - document = builder.parse(stream); - } catch (Exception ex) { - throw new ConfigurationException("Can't load existing configuration", ex); - } - } - - /** - * Add configuration fragment. - */ - public void addConfig(String key, String defaultValue, String description, - ConfigTag[] tags) { - Element root = document.getDocumentElement(); - Element propertyElement = document.createElement("property"); - - addXmlElement(propertyElement, "name", key); - - addXmlElement(propertyElement, "value", defaultValue); - - addXmlElement(propertyElement, "description", description); - - String tagsAsString = Arrays.stream(tags).map(tag -> tag.name()) - .collect(Collectors.joining(", ")); - - addXmlElement(propertyElement, "tag", tagsAsString); - - root.appendChild(propertyElement); - } - - private void addXmlElement(Element parentElement, String tagValue, - String textValue) { - Element element = document.createElement(tagValue); - element.appendChild(document.createTextNode(textValue)); - parentElement.appendChild(element); - } - - /** - * Write out the XML content to a writer. - */ - public void write(Writer writer) { - try { - TransformerFactory transformerFactory = TransformerFactory.newInstance(); - Transformer transf = transformerFactory.newTransformer(); - - transf.setOutputProperty(OutputKeys.ENCODING, "UTF-8"); - transf.setOutputProperty(OutputKeys.INDENT, "yes"); - transf - .setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2"); - - transf.transform(new DOMSource(document), new StreamResult(writer)); - } catch (TransformerException e) { - throw new ConfigurationException("Can't write the configuration xml", e); - } - } -} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java deleted file mode 100644 index 471b679f8452e..0000000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import javax.annotation.processing.AbstractProcessor; -import javax.annotation.processing.Filer; -import javax.annotation.processing.RoundEnvironment; -import javax.annotation.processing.SupportedAnnotationTypes; -import javax.lang.model.element.Element; -import javax.lang.model.element.ElementKind; -import javax.lang.model.element.TypeElement; -import javax.tools.Diagnostic.Kind; -import javax.tools.FileObject; -import javax.tools.StandardLocation; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStreamWriter; -import java.io.Writer; -import java.nio.charset.StandardCharsets; -import java.nio.file.NoSuchFileException; -import java.util.Set; - -/** - * Annotation processor to generate config fragments from Config annotations. - */ -@SupportedAnnotationTypes("org.apache.hadoop.hdds.conf.ConfigGroup") -public class ConfigFileGenerator extends AbstractProcessor { - - public static final String OUTPUT_FILE_NAME = "ozone-default-generated.xml"; - - @Override - public boolean process(Set annotations, - RoundEnvironment roundEnv) { - if (roundEnv.processingOver()) { - return false; - } - - Filer filer = processingEnv.getFiler(); - - try { - - //load existing generated config (if exists) - ConfigFileAppender appender = new ConfigFileAppender(); - try (InputStream input = filer - .getResource(StandardLocation.CLASS_OUTPUT, "", - OUTPUT_FILE_NAME).openInputStream()) { - appender.load(input); - } catch (FileNotFoundException | NoSuchFileException ex) { - appender.init(); - } - - Set annotatedElements = - roundEnv.getElementsAnnotatedWith(ConfigGroup.class); - for (Element annotatedElement : annotatedElements) { - TypeElement configGroup = (TypeElement) annotatedElement; - - //check if any of the setters are annotated with @Config - for (Element element : configGroup.getEnclosedElements()) { - if (element.getKind() == ElementKind.METHOD) { - processingEnv.getMessager() - .printMessage(Kind.WARNING, element.getSimpleName().toString()); - if (element.getSimpleName().toString().startsWith("set") - && element.getAnnotation(Config.class) != null) { - - //update the ozone-site-generated.xml - Config configAnnotation = element.getAnnotation(Config.class); - ConfigGroup configGroupAnnotation = - configGroup.getAnnotation(ConfigGroup.class); - - String key = configGroupAnnotation.prefix() + "." - + configAnnotation.key(); - - appender.addConfig(key, - configAnnotation.defaultValue(), - configAnnotation.description(), - configAnnotation.tags()); - } - } - - } - } - FileObject resource = filer - .createResource(StandardLocation.CLASS_OUTPUT, "", - OUTPUT_FILE_NAME); - - try (Writer writer = new OutputStreamWriter( - resource.openOutputStream(), StandardCharsets.UTF_8)) { - appender.write(writer); - } - - } catch (IOException e) { - processingEnv.getMessager().printMessage(Kind.ERROR, - "Can't generate the config file from annotation: " + e); - } - return false; - } - -} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java deleted file mode 100644 index dd24ccbf0031c..0000000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Mark pojo which holds configuration variables. - */ -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.TYPE) -public @interface ConfigGroup { - String prefix(); -} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java deleted file mode 100644 index de50d2afe9eaf..0000000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -/** - * Available config tags. - *

- * Note: the values are defined in ozone-default.xml by hadoop.tags.custom. - */ -public enum ConfigTag { - OZONE, - MANAGEMENT, - SECURITY, - PERFORMANCE, - DEBUG, - CLIENT, - SERVER, - OM, - SCM, - CRITICAL, - RATIS, - CONTAINER, - REQUIRED, - REST, - STORAGE, - PIPELINE, - STANDALONE, - S3GATEWAY -} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java deleted file mode 100644 index 23a81042b26a0..0000000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -/** - * Possible type of injected configuration. - *

- * AUTO means that the exact type will be identified based on the java type of - * the configuration field. - */ -public enum ConfigType { - AUTO, - STRING, - BOOLEAN, - INT, - LONG, - TIME, - SIZE -} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java deleted file mode 100644 index 2e680126a0917..0000000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -/** - * Exception to throw in case of a configuration problem. - */ -public class ConfigurationException extends RuntimeException { - public ConfigurationException() { - } - - public ConfigurationException(String message) { - super(message); - } - - public ConfigurationException(String message, Throwable cause) { - super(message, cause); - } -} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/package-info.java deleted file mode 100644 index e789040d276d2..0000000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Generic configuration annotations, tools and generators. - */ -package org.apache.hadoop.hdds.conf; diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java deleted file mode 100644 index 2dd26696b2760..0000000000000 --- a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import java.util.concurrent.TimeUnit; - -/** - * Example configuration to test the configuration injection. - */ -@ConfigGroup(prefix = "ozone.scm.client") -public class ConfigurationExample { - - private String clientAddress; - - private String bindHost; - - private boolean compressionEnabled; - - private int port = 1234; - - private long waitTime = 1; - - @Config(key = "address", defaultValue = "localhost", description = "Client " - + "addres (To test string injection).", tags = ConfigTag.MANAGEMENT) - public void setClientAddress(String clientAddress) { - this.clientAddress = clientAddress; - } - - @Config(key = "bind.host", defaultValue = "0.0.0.0", description = "Bind " - + "host(To test string injection).", tags = ConfigTag.MANAGEMENT) - public void setBindHost(String bindHost) { - this.bindHost = bindHost; - } - - @Config(key = "compression.enabled", defaultValue = "true", description = - "Compression enabled. (Just to test boolean flag)", tags = - ConfigTag.MANAGEMENT) - public void setCompressionEnabled(boolean compressionEnabled) { - this.compressionEnabled = compressionEnabled; - } - - @Config(key = "port", defaultValue = "1234", description = "Port number " - + "config (To test in injection)", tags = ConfigTag.MANAGEMENT) - public void setPort(int port) { - this.port = port; - } - - @Config(key = "wait", type = ConfigType.TIME, timeUnit = - TimeUnit.SECONDS, defaultValue = "30m", description = "Wait time (To " - + "test TIME config type)", tags = ConfigTag.MANAGEMENT) - public void setWaitTime(long waitTime) { - this.waitTime = waitTime; - } - - public String getClientAddress() { - return clientAddress; - } - - public String getBindHost() { - return bindHost; - } - - public boolean isCompressionEnabled() { - return compressionEnabled; - } - - public int getPort() { - return port; - } - - public long getWaitTime() { - return waitTime; - } -} diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java deleted file mode 100644 index 0edb01a02b403..0000000000000 --- a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import java.io.StringWriter; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Test the utility which loads/writes the config file fragments. - */ -public class TestConfigFileAppender { - - @Test - public void testInit() { - ConfigFileAppender appender = new ConfigFileAppender(); - - appender.init(); - - appender.addConfig("hadoop.scm.enabled", "true", "desc", - new ConfigTag[] {ConfigTag.OZONE, ConfigTag.SECURITY}); - - StringWriter builder = new StringWriter(); - appender.write(builder); - - Assert.assertTrue("Generated config should contain property key entry", - builder.toString().contains("hadoop.scm.enabled")); - - Assert.assertTrue("Generated config should contain tags", - builder.toString().contains("OZONE, SECURITY")); - } -} \ No newline at end of file diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/package-info.java deleted file mode 100644 index e8b310d109c23..0000000000000 --- a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *

- * Testing configuration tools. - */ - -/** - * Testing configuration tools. - */ -package org.apache.hadoop.hdds.conf; diff --git a/hadoop-hdds/config/src/test/resources/META-INF/services/javax.annotation.processing.Processor b/hadoop-hdds/config/src/test/resources/META-INF/services/javax.annotation.processing.Processor deleted file mode 100644 index f29efdab384d1..0000000000000 --- a/hadoop-hdds/config/src/test/resources/META-INF/services/javax.annotation.processing.Processor +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.hdds.conf.ConfigFileGenerator diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml deleted file mode 100644 index 18128e8952859..0000000000000 --- a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - - - - - - - - - diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml deleted file mode 100644 index 0eef961733dd7..0000000000000 --- a/hadoop-hdds/container-service/pom.xml +++ /dev/null @@ -1,103 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-hdds - 0.5.0-SNAPSHOT - - hadoop-hdds-container-service - 0.5.0-SNAPSHOT - Apache Hadoop Distributed Data Store Container Service - Apache Hadoop HDDS Container Service - jar - - - - org.apache.hadoop - hadoop-hdds-common - - - org.apache.hadoop - hadoop-hdds-server-framework - - - io.dropwizard.metrics - metrics-core - - - - org.mockito - mockito-core - 2.2.0 - test - - - - org.yaml - snakeyaml - 1.16 - - - com.github.spotbugs - spotbugs - provided - - - - - - - org.apache.hadoop - hadoop-maven-plugins - - - compile-protoc - - protoc - - - ${protobuf.version} - ${protoc.path} - - - ${basedir}/../../hadoop-hdds/common/src/main/proto/ - - ${basedir}/src/main/proto - - - ${basedir}/src/main/proto - - StorageContainerDatanodeProtocol.proto - - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - - - - diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java deleted file mode 100644 index c1997d6c899b5..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java +++ /dev/null @@ -1,384 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm; - -import com.google.common.base.Strings; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.net.InetSocketAddress; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_HEARTBEAT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_HEARTBEAT_INTERVAL_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DEADNODE_INTERVAL_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_STALENODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_STALENODE_INTERVAL_DEFAULT; -import static org.apache.hadoop.hdds.HddsUtils.*; -import static org.apache.hadoop.hdds.server.ServerUtils.sanitizeUserArgs; - -/** - * Hdds stateless helper functions for server side components. - */ -public final class HddsServerUtil { - - private HddsServerUtil() { - } - - private static final Logger LOG = LoggerFactory.getLogger( - HddsServerUtil.class); - - /** - * Retrieve the socket address that should be used by DataNodes to connect - * to the SCM. - * - * @param conf - * @return Target InetSocketAddress for the SCM service endpoint. - */ - public static InetSocketAddress getScmAddressForDataNodes( - Configuration conf) { - // We try the following settings in decreasing priority to retrieve the - // target host. - // - OZONE_SCM_DATANODE_ADDRESS_KEY - // - OZONE_SCM_CLIENT_ADDRESS_KEY - // - OZONE_SCM_NAMES - // - Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - - if (!host.isPresent()) { - // Fallback to Ozone SCM names. - Collection scmAddresses = getSCMAddresses(conf); - if (scmAddresses.size() > 1) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_NAMES + - " must contain a single hostname. Multiple SCM hosts are " + - "currently unsupported"); - } - host = Optional.of(scmAddresses.iterator().next().getHostName()); - } - - if (!host.isPresent()) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY + - " must be defined. See" + - " https://wiki.apache.org/hadoop/Ozone#Configuration " - + "for details on configuring Ozone."); - } - - // If no port number is specified then we'll just try the defaultBindPort. - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY); - - InetSocketAddress addr = NetUtils.createSocketAddr(host.get() + ":" + - port.orElse(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); - - return addr; - } - - /** - * Retrieve the socket address that should be used by clients to connect - * to the SCM. - * - * @param conf - * @return Target InetSocketAddress for the SCM client endpoint. - */ - public static InetSocketAddress getScmClientBindAddress( - Configuration conf) { - final Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY); - - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - - return NetUtils.createSocketAddr( - host.orElse(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT) + ":" + - port.orElse(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT)); - } - - /** - * Retrieve the socket address that should be used by clients to connect - * to the SCM Block service. - * - * @param conf - * @return Target InetSocketAddress for the SCM block client endpoint. - */ - public static InetSocketAddress getScmBlockClientBindAddress( - Configuration conf) { - final Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY); - - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY); - - return NetUtils.createSocketAddr( - host.orElse(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT) - + ":" - + port.orElse(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT)); - } - - /** - * Retrieve the socket address that should be used by scm security server to - * service clients. - * - * @param conf - * @return Target InetSocketAddress for the SCM security service. - */ - public static InetSocketAddress getScmSecurityInetAddress( - Configuration conf) { - final Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY); - - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY); - - return NetUtils.createSocketAddr( - host.orElse(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_BIND_HOST_DEFAULT) - + ":" + port - .orElse(conf.getInt(ScmConfigKeys - .OZONE_SCM_SECURITY_SERVICE_PORT_KEY, - ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT))); - } - - /** - * Retrieve the socket address that should be used by DataNodes to connect - * to the SCM. - * - * @param conf - * @return Target InetSocketAddress for the SCM service endpoint. - */ - public static InetSocketAddress getScmDataNodeBindAddress( - Configuration conf) { - final Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY); - - // If no port number is specified then we'll just try the defaultBindPort. - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY); - - return NetUtils.createSocketAddr( - host.orElse(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_DEFAULT) + ":" + - port.orElse(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); - } - - - /** - * Returns the interval in which the heartbeat processor thread runs. - * - * @param conf - Configuration - * @return long in Milliseconds. - */ - public static long getScmheartbeatCheckerInterval(Configuration conf) { - return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - } - - /** - * Heartbeat Interval - Defines the heartbeat frequency from a datanode to - * SCM. - * - * @param conf - Ozone Config - * @return - HB interval in milli seconds. - */ - public static long getScmHeartbeatInterval(Configuration conf) { - return conf.getTimeDuration(HDDS_HEARTBEAT_INTERVAL, - HDDS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); - } - - /** - * Get the Stale Node interval, which is used by SCM to flag a datanode as - * stale, if the heartbeat from that node has been missing for this duration. - * - * @param conf - Configuration. - * @return - Long, Milliseconds to wait before flagging a node as stale. - */ - public static long getStaleNodeInterval(Configuration conf) { - - long staleNodeIntervalMs = - conf.getTimeDuration(OZONE_SCM_STALENODE_INTERVAL, - OZONE_SCM_STALENODE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); - - long heartbeatThreadFrequencyMs = getScmheartbeatCheckerInterval(conf); - - long heartbeatIntervalMs = getScmHeartbeatInterval(conf); - - - // Make sure that StaleNodeInterval is configured way above the frequency - // at which we run the heartbeat thread. - // - // Here we check that staleNodeInterval is at least five times more than the - // frequency at which the accounting thread is going to run. - staleNodeIntervalMs = sanitizeUserArgs(OZONE_SCM_STALENODE_INTERVAL, - staleNodeIntervalMs, OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - heartbeatThreadFrequencyMs, 5, 1000); - - // Make sure that stale node value is greater than configured value that - // datanodes are going to send HBs. - staleNodeIntervalMs = sanitizeUserArgs(OZONE_SCM_STALENODE_INTERVAL, - staleNodeIntervalMs, HDDS_HEARTBEAT_INTERVAL, heartbeatIntervalMs, 3, - 1000); - return staleNodeIntervalMs; - } - - /** - * Gets the interval for dead node flagging. This has to be a value that is - * greater than stale node value, and by transitive relation we also know - * that this value is greater than heartbeat interval and heartbeatProcess - * Interval. - * - * @param conf - Configuration. - * @return - the interval for dead node flagging. - */ - public static long getDeadNodeInterval(Configuration conf) { - long staleNodeIntervalMs = getStaleNodeInterval(conf); - long deadNodeIntervalMs = conf.getTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, - OZONE_SCM_DEADNODE_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - - // Make sure that dead nodes Ms is at least twice the time for staleNodes - // with a max of 1000 times the staleNodes. - return sanitizeUserArgs(OZONE_SCM_DEADNODE_INTERVAL, deadNodeIntervalMs, - OZONE_SCM_STALENODE_INTERVAL, staleNodeIntervalMs, 2, 1000); - } - - /** - * Timeout value for the RPC from Datanode to SCM, primarily used for - * Heartbeats and container reports. - * - * @param conf - Ozone Config - * @return - Rpc timeout in Milliseconds. - */ - public static long getScmRpcTimeOutInMilliseconds(Configuration conf) { - return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, - OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - } - - /** - * Log Warn interval. - * - * @param conf - Ozone Config - * @return - Log warn interval. - */ - public static int getLogWarnInterval(Configuration conf) { - return conf.getInt(OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT, - OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT); - } - - /** - * returns the Container port. - * @param conf - Conf - * @return port number. - */ - public static int getContainerPort(Configuration conf) { - return conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); - } - - - /** - * Return the list of service addresses for the Ozone SCM. This method is used - * by the DataNodes to determine the service instances to connect to. - * - * @param conf - * @return list of SCM service addresses. - */ - public static Map> - getScmServiceRpcAddresses(Configuration conf) { - - final Map serviceInstances = new HashMap<>(); - serviceInstances.put(OZONE_SCM_SERVICE_INSTANCE_ID, - getScmAddressForDataNodes(conf)); - - final Map> services = - new HashMap<>(); - services.put(OZONE_SCM_SERVICE_ID, serviceInstances); - return services; - } - - public static String getOzoneDatanodeRatisDirectory(Configuration conf) { - String storageDir = conf.get( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); - - if (Strings.isNullOrEmpty(storageDir)) { - storageDir = getDefaultRatisDirectory(conf); - } - return storageDir; - } - - public static String getDefaultRatisDirectory(Configuration conf) { - LOG.warn("Storage directory for Ratis is not configured. It is a good " + - "idea to map this to an SSD disk. Falling back to {}", - HddsConfigKeys.OZONE_METADATA_DIRS); - File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf); - return (new File(metaDirPath, "ratis")).getPath(); - } - - /** - * Get the path for datanode id file. - * - * @param conf - Configuration - * @return the path of datanode id as string - */ - public static String getDatanodeIdFilePath(Configuration conf) { - String dataNodeIDDirPath = - conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR); - if (dataNodeIDDirPath == null) { - File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf); - if (metaDirPath == null) { - // this means meta data is not found, in theory should not happen at - // this point because should've failed earlier. - throw new IllegalArgumentException("Unable to locate meta data" + - "directory when getting datanode id path"); - } - dataNodeIDDirPath = metaDirPath.toString(); - } - // Use default datanode id file name for file path - return new File(dataNodeIDDirPath, - OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT).toString(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java deleted file mode 100644 index 4e520466254ee..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; - -/** - * This is a class that tracks versions of SCM. - */ -public final class VersionInfo { - - // We will just be normal and use positive counting numbers for versions. - private final static VersionInfo[] VERSION_INFOS = - {new VersionInfo("First version of SCM", 1)}; - - - public static final String DESCRIPTION_KEY = "Description"; - private final String description; - private final int version; - - /** - * Never created outside this class. - * - * @param description -- description - * @param version -- version number - */ - private VersionInfo(String description, int version) { - this.description = description; - this.version = version; - } - - /** - * Returns all versions. - * - * @return Version info array. - */ - public static VersionInfo[] getAllVersions() { - return VERSION_INFOS.clone(); - } - - /** - * Returns the latest version. - * - * @return versionInfo - */ - public static VersionInfo getLatestVersion() { - return VERSION_INFOS[VERSION_INFOS.length - 1]; - } - - /** - * Return description. - * - * @return String - */ - public String getDescription() { - return description; - } - - /** - * Return the version. - * - * @return int. - */ - public int getVersion() { - return version; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/package-info.java deleted file mode 100644 index 590546896a480..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java deleted file mode 100644 index 3dcfcfe547c76..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.server.BaseHttpServer; - -/** - * Simple http server to provide basic monitoring for hdds datanode. - *

- * This server is used to access default /conf /prom /prof endpoints. - */ -public class HddsDatanodeHttpServer extends BaseHttpServer { - - public HddsDatanodeHttpServer(Configuration conf) throws IOException { - super(conf, "hddsDatanode"); - } - - @Override - protected String getHttpAddressKey() { - return HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY; - } - - @Override - protected String getHttpBindHostKey() { - return HddsConfigKeys.HDDS_DATANODE_HTTP_BIND_HOST_KEY; - } - - @Override - protected String getHttpsAddressKey() { - return HddsConfigKeys.HDDS_DATANODE_HTTPS_ADDRESS_KEY; - } - - @Override - protected String getHttpsBindHostKey() { - return HddsConfigKeys.HDDS_DATANODE_HTTPS_BIND_HOST_KEY; - } - - @Override - protected String getBindHostDefault() { - return HddsConfigKeys.HDDS_DATANODE_HTTP_BIND_HOST_DEFAULT; - } - - @Override - protected int getHttpBindPortDefault() { - return HddsConfigKeys.HDDS_DATANODE_HTTP_BIND_PORT_DEFAULT; - } - - @Override - protected int getHttpsBindPortDefault() { - return HddsConfigKeys.HDDS_DATANODE_HTTPS_BIND_PORT_DEFAULT; - } - - @Override - protected String getKeytabFile() { - return HddsConfigKeys.HDDS_DATANODE_HTTP_KERBEROS_KEYTAB_FILE_KEY; - } - - @Override - protected String getSpnegoPrincipal() { - return HddsConfigKeys.HDDS_DATANODE_HTTP_KERBEROS_PRINCIPAL_KEY; - } - - @Override - protected String getEnabledKey() { - return HddsConfigKeys.HDDS_DATANODE_HTTP_ENABLED_KEY; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java deleted file mode 100644 index b13c37dd45342..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ /dev/null @@ -1,495 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient; -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.hadoop.util.ServicePlugin; -import org.apache.hadoop.util.StringUtils; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; - -import java.io.File; -import java.io.IOException; -import java.net.InetAddress; -import java.security.KeyPair; -import java.security.cert.CertificateException; -import java.util.Arrays; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec.getX509Certificate; -import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString; -import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY; -import static org.apache.hadoop.util.ExitUtil.terminate; - -/** - * Datanode service plugin to start the HDDS container services. - */ - -@Command(name = "ozone datanode", - hidden = true, description = "Start the datanode for ozone", - versionProvider = HddsVersionProvider.class, - mixinStandardHelpOptions = true) -public class HddsDatanodeService extends GenericCli implements ServicePlugin { - - private static final Logger LOG = LoggerFactory.getLogger( - HddsDatanodeService.class); - - private OzoneConfiguration conf; - private DatanodeDetails datanodeDetails; - private DatanodeStateMachine datanodeStateMachine; - private List plugins; - private CertificateClient dnCertClient; - private String component; - private HddsDatanodeHttpServer httpServer; - private boolean printBanner; - private String[] args; - private volatile AtomicBoolean isStopped = new AtomicBoolean(false); - - public HddsDatanodeService(boolean printBanner, String[] args) { - this.printBanner = printBanner; - this.args = args != null ? Arrays.copyOf(args, args.length) : null; - } - - /** - * Create an Datanode instance based on the supplied command-line arguments. - *

- * This method is intended for unit tests only. It suppresses the - * startup/shutdown message and skips registering Unix signal handlers. - * - * @param args command line arguments. - * @return Datanode instance - */ - @VisibleForTesting - public static HddsDatanodeService createHddsDatanodeService( - String[] args) { - return createHddsDatanodeService(args, false); - } - - /** - * Create an Datanode instance based on the supplied command-line arguments. - * - * @param args command line arguments. - * @param printBanner if true, then log a verbose startup message. - * @return Datanode instance - */ - private static HddsDatanodeService createHddsDatanodeService( - String[] args, boolean printBanner) { - return new HddsDatanodeService(printBanner, args); - } - - public static void main(String[] args) { - try { - HddsDatanodeService hddsDatanodeService = - createHddsDatanodeService(args, true); - hddsDatanodeService.run(args); - } catch (Throwable e) { - LOG.error("Exception in HddsDatanodeService.", e); - terminate(1, e); - } - } - - public static Logger getLogger() { - return LOG; - } - - @Override - public Void call() throws Exception { - if (printBanner) { - StringUtils - .startupShutdownMessage(HddsDatanodeService.class, args, LOG); - } - start(createOzoneConfiguration()); - join(); - return null; - } - - public void setConfiguration(OzoneConfiguration configuration) { - this.conf = configuration; - } - - /** - * Starts HddsDatanode services. - * - * @param service The service instance invoking this method - */ - @Override - public void start(Object service) { - if (service instanceof Configurable) { - start(new OzoneConfiguration(((Configurable) service).getConf())); - } else { - start(new OzoneConfiguration()); - } - } - - public void start(OzoneConfiguration configuration) { - setConfiguration(configuration); - start(); - } - - public void start() { - OzoneConfiguration.activate(); - HddsUtils.initializeMetrics(conf, "HddsDatanode"); - if (HddsUtils.isHddsEnabled(conf)) { - try { - String hostname = HddsUtils.getHostName(conf); - String ip = InetAddress.getByName(hostname).getHostAddress(); - datanodeDetails = initializeDatanodeDetails(); - datanodeDetails.setHostName(hostname); - datanodeDetails.setIpAddress(ip); - TracingUtil.initTracing( - "HddsDatanodeService." + datanodeDetails.getUuidString() - .substring(0, 8)); - LOG.info("HddsDatanodeService host:{} ip:{}", hostname, ip); - // Authenticate Hdds Datanode service if security is enabled - if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - component = "dn-" + datanodeDetails.getUuidString(); - - dnCertClient = new DNCertificateClient(new SecurityConfig(conf), - datanodeDetails.getCertSerialId()); - - if (SecurityUtil.getAuthenticationMethod(conf).equals( - UserGroupInformation.AuthenticationMethod.KERBEROS)) { - LOG.info("Ozone security is enabled. Attempting login for Hdds " + - "Datanode user. Principal: {},keytab: {}", conf.get( - DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY), - conf.get(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY)); - - UserGroupInformation.setConfiguration(conf); - - SecurityUtil.login(conf, DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, - DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hostname); - } else { - throw new AuthenticationException(SecurityUtil. - getAuthenticationMethod(conf) + " authentication method not " + - "supported. Datanode user" + " login " + "failed."); - } - LOG.info("Hdds Datanode login successful."); - } - if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - initializeCertificateClient(conf); - } - datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, conf, - dnCertClient, this::terminateDatanode); - try { - httpServer = new HddsDatanodeHttpServer(conf); - httpServer.start(); - } catch (Exception ex) { - LOG.error("HttpServer failed to start.", ex); - } - startPlugins(); - // Starting HDDS Daemons - datanodeStateMachine.startDaemon(); - } catch (IOException e) { - throw new RuntimeException("Can't start the HDDS datanode plugin", e); - } catch (AuthenticationException ex) { - throw new RuntimeException("Fail to authentication when starting" + - " HDDS datanode plugin", ex); - } - } - } - - /** - * Initializes secure Datanode. - * */ - @VisibleForTesting - public void initializeCertificateClient(OzoneConfiguration config) - throws IOException { - LOG.info("Initializing secure Datanode."); - - CertificateClient.InitResponse response = dnCertClient.init(); - LOG.info("Init response: {}", response); - switch (response) { - case SUCCESS: - LOG.info("Initialization successful, case:{}.", response); - break; - case GETCERT: - getSCMSignedCert(config); - LOG.info("Successfully stored SCM signed certificate, case:{}.", - response); - break; - case FAILURE: - LOG.error("DN security initialization failed, case:{}.", response); - throw new RuntimeException("DN security initialization failed."); - case RECOVER: - LOG.error("DN security initialization failed, case:{}. OM certificate " + - "is missing.", response); - throw new RuntimeException("DN security initialization failed."); - default: - LOG.error("DN security initialization failed. Init response: {}", - response); - throw new RuntimeException("DN security initialization failed."); - } - } - - /** - * Get SCM signed certificate and store it using certificate client. - * @param config - * */ - private void getSCMSignedCert(OzoneConfiguration config) { - try { - PKCS10CertificationRequest csr = getCSR(config); - // TODO: For SCM CA we should fetch certificate from multiple SCMs. - SCMSecurityProtocolClientSideTranslatorPB secureScmClient = - HddsUtils.getScmSecurityClient(config); - SCMGetCertResponseProto response = secureScmClient. - getDataNodeCertificateChain(datanodeDetails.getProtoBufMessage(), - getEncodedString(csr)); - // Persist certificates. - if(response.hasX509CACertificate()) { - String pemEncodedCert = response.getX509Certificate(); - dnCertClient.storeCertificate(pemEncodedCert, true); - dnCertClient.storeCertificate(response.getX509CACertificate(), true, - true); - datanodeDetails.setCertSerialId(getX509Certificate(pemEncodedCert). - getSerialNumber().toString()); - persistDatanodeDetails(datanodeDetails); - } else { - throw new RuntimeException("Unable to retrieve datanode certificate " + - "chain"); - } - } catch (IOException | CertificateException e) { - LOG.error("Error while storing SCM signed certificate.", e); - throw new RuntimeException(e); - } - } - - /** - * Creates CSR for DN. - * @param config - * */ - @VisibleForTesting - public PKCS10CertificationRequest getCSR(Configuration config) - throws IOException { - CertificateSignRequest.Builder builder = dnCertClient.getCSRBuilder(); - KeyPair keyPair = new KeyPair(dnCertClient.getPublicKey(), - dnCertClient.getPrivateKey()); - - String hostname = InetAddress.getLocalHost().getCanonicalHostName(); - String subject = UserGroupInformation.getCurrentUser() - .getShortUserName() + "@" + hostname; - - builder.setCA(false) - .setKey(keyPair) - .setConfiguration(config) - .setSubject(subject); - - LOG.info("Creating csr for DN-> subject:{}", subject); - return builder.build(); - } - - /** - * Returns DatanodeDetails or null in case of Error. - * - * @return DatanodeDetails - */ - private DatanodeDetails initializeDatanodeDetails() - throws IOException { - String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf); - if (idFilePath == null || idFilePath.isEmpty()) { - LOG.error("A valid path is needed for config setting {}", - ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR); - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR + - " must be defined. See" + - " https://wiki.apache.org/hadoop/Ozone#Configuration" + - " for details on configuring Ozone."); - } - - Preconditions.checkNotNull(idFilePath); - File idFile = new File(idFilePath); - if (idFile.exists()) { - return ContainerUtils.readDatanodeDetailsFrom(idFile); - } else { - // There is no datanode.id file, this might be the first time datanode - // is started. - String datanodeUuid = UUID.randomUUID().toString(); - return DatanodeDetails.newBuilder().setUuid(datanodeUuid).build(); - } - } - - /** - * Persist DatanodeDetails to file system. - * @param dnDetails - * - * @return DatanodeDetails - */ - private void persistDatanodeDetails(DatanodeDetails dnDetails) - throws IOException { - String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf); - if (idFilePath == null || idFilePath.isEmpty()) { - LOG.error("A valid path is needed for config setting {}", - ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR); - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR + - " must be defined. See" + - " https://wiki.apache.org/hadoop/Ozone#Configuration" + - " for details on configuring Ozone."); - } - - Preconditions.checkNotNull(idFilePath); - File idFile = new File(idFilePath); - ContainerUtils.writeDatanodeDetailsTo(dnDetails, idFile); - } - - /** - * Starts all the service plugins which are configured using - * OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY. - */ - private void startPlugins() { - try { - plugins = conf.getInstances(HDDS_DATANODE_PLUGINS_KEY, - ServicePlugin.class); - } catch (RuntimeException e) { - String pluginsValue = conf.get(HDDS_DATANODE_PLUGINS_KEY); - LOG.error("Unable to load HDDS DataNode plugins. " + - "Specified list of plugins: {}", - pluginsValue, e); - throw e; - } - for (ServicePlugin plugin : plugins) { - try { - plugin.start(this); - LOG.info("Started plug-in {}", plugin); - } catch (Throwable t) { - LOG.warn("ServicePlugin {} could not be started", plugin, t); - } - } - } - - /** - * Returns the OzoneConfiguration used by this HddsDatanodeService. - * - * @return OzoneConfiguration - */ - public OzoneConfiguration getConf() { - return conf; - } - - /** - * Return DatanodeDetails if set, return null otherwise. - * - * @return DatanodeDetails - */ - @VisibleForTesting - public DatanodeDetails getDatanodeDetails() { - return datanodeDetails; - } - - @VisibleForTesting - public DatanodeStateMachine getDatanodeStateMachine() { - return datanodeStateMachine; - } - - public void join() { - if (datanodeStateMachine != null) { - try { - datanodeStateMachine.join(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - LOG.info("Interrupted during StorageContainerManager join."); - } - } - } - - public void terminateDatanode() { - stop(); - terminate(1); - } - - - @Override - public void stop() { - if (!isStopped.get()) { - isStopped.set(true); - if (plugins != null) { - for (ServicePlugin plugin : plugins) { - try { - plugin.stop(); - LOG.info("Stopped plug-in {}", plugin); - } catch (Throwable t) { - LOG.warn("ServicePlugin {} could not be stopped", plugin, t); - } - } - } - if (datanodeStateMachine != null) { - datanodeStateMachine.stopDaemon(); - } - if (httpServer != null) { - try { - httpServer.stop(); - } catch (Exception e) { - LOG.error("Stopping HttpServer is failed.", e); - } - } - } - } - - @Override - public void close() { - if (plugins != null) { - for (ServicePlugin plugin : plugins) { - try { - plugin.close(); - } catch (Throwable t) { - LOG.warn("ServicePlugin {} could not be closed", plugin, t); - } - } - } - } - - @VisibleForTesting - public String getComponent() { - return component; - } - - public CertificateClient getCertificateClient() { - return dnCertClient; - } - - @VisibleForTesting - public void setCertificateClient(CertificateClient client) { - dnCertClient = client; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeStopService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeStopService.java deleted file mode 100644 index 02c1431fb3ab0..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeStopService.java +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -/** - * Interface which declares a method to stop HddsDatanodeService. - */ -public interface HddsDatanodeStopService { - - void stopService(); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java deleted file mode 100644 index 2d58c39a15194..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common; - -/** - * Datanode layout version which describes information about the layout version - * on the datanode. - */ -public final class DataNodeLayoutVersion { - - // We will just be normal and use positive counting numbers for versions. - private final static DataNodeLayoutVersion[] VERSION_INFOS = - {new DataNodeLayoutVersion(1, "HDDS Datanode LayOut Version 1")}; - - private final String description; - private final int version; - - /** - * Never created outside this class. - * - * @param description -- description - * @param version -- version number - */ - private DataNodeLayoutVersion(int version, String description) { - this.description = description; - this.version = version; - } - - /** - * Returns all versions. - * - * @return Version info array. - */ - public static DataNodeLayoutVersion[] getAllVersions() { - return VERSION_INFOS.clone(); - } - - /** - * Returns the latest version. - * - * @return versionInfo - */ - public static DataNodeLayoutVersion getLatestVersion() { - return VERSION_INFOS[VERSION_INFOS.length - 1]; - } - - /** - * Return description. - * - * @return String - */ - public String getDescription() { - return description; - } - - /** - * Return the version. - * - * @return int. - */ - public int getVersion() { - return version; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java deleted file mode 100644 index 9ea4adf8a7250..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.helpers; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MetricsRegistry; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableQuantiles; -import org.apache.hadoop.metrics2.lib.MutableRate; - -/** - * - * This class is for maintaining the various Storage Container - * DataNode statistics and publishing them through the metrics interfaces. - * This also registers the JMX MBean for RPC. - *

- * This class has a number of metrics variables that are publicly accessible; - * these variables (objects) have methods to update their values; - * for example: - *

{@link #numOps}.inc() - * - */ -@InterfaceAudience.Private -@Metrics(about="Storage Container DataNode Metrics", context="dfs") -public class ContainerMetrics { - public static final String STORAGE_CONTAINER_METRICS = - "StorageContainerMetrics"; - @Metric private MutableCounterLong numOps; - private MutableCounterLong[] numOpsArray; - private MutableCounterLong[] opsBytesArray; - private MutableRate[] opsLatency; - private MutableQuantiles[][] opsLatQuantiles; - private MetricsRegistry registry = null; - - public ContainerMetrics(int[] intervals) { - int numEnumEntries = ContainerProtos.Type.values().length; - final int len = intervals.length; - this.numOpsArray = new MutableCounterLong[numEnumEntries]; - this.opsBytesArray = new MutableCounterLong[numEnumEntries]; - this.opsLatency = new MutableRate[numEnumEntries]; - this.opsLatQuantiles = new MutableQuantiles[numEnumEntries][len]; - this.registry = new MetricsRegistry("StorageContainerMetrics"); - for (int i = 0; i < numEnumEntries; i++) { - numOpsArray[i] = registry.newCounter( - "num" + ContainerProtos.Type.forNumber(i + 1), - "number of " + ContainerProtos.Type.forNumber(i + 1) + " ops", - (long) 0); - opsBytesArray[i] = registry.newCounter( - "bytes" + ContainerProtos.Type.forNumber(i + 1), - "bytes used by " + ContainerProtos.Type.forNumber(i + 1) + "op", - (long) 0); - opsLatency[i] = registry.newRate( - "latency" + ContainerProtos.Type.forNumber(i + 1), - ContainerProtos.Type.forNumber(i + 1) + " op"); - - for (int j = 0; j < len; j++) { - int interval = intervals[j]; - String quantileName = ContainerProtos.Type.forNumber(i + 1) + "Nanos" - + interval + "s"; - opsLatQuantiles[i][j] = registry.newQuantiles(quantileName, - "latency of Container ops", "ops", "latency", interval); - } - } - } - - public static ContainerMetrics create(Configuration conf) { - MetricsSystem ms = DefaultMetricsSystem.instance(); - // Percentile measurement is off by default, by watching no intervals - int[] intervals = - conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY); - return ms.register(STORAGE_CONTAINER_METRICS, - "Storage Container Node Metrics", - new ContainerMetrics(intervals)); - } - - public static void remove() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(STORAGE_CONTAINER_METRICS); - } - - public void incContainerOpsMetrics(ContainerProtos.Type type) { - numOps.incr(); - numOpsArray[type.ordinal()].incr(); - } - - public long getContainerOpsMetrics(ContainerProtos.Type type){ - return numOpsArray[type.ordinal()].value(); - } - - public void incContainerOpsLatencies(ContainerProtos.Type type, - long latencyNanos) { - opsLatency[type.ordinal()].add(latencyNanos); - for (MutableQuantiles q: opsLatQuantiles[type.ordinal()]) { - q.add(latencyNanos); - } - } - - public void incContainerBytesStats(ContainerProtos.Type type, long bytes) { - opsBytesArray[type.ordinal()].incr(bytes); - } - - public long getContainerBytesMetrics(ContainerProtos.Type type){ - return opsBytesArray[type.ordinal()].value(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java deleted file mode 100644 index ff6dec83cd08c..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.helpers; - -import static org.apache.commons.io.FilenameUtils.removeExtension; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_CHECKSUM_ERROR; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_ALGORITHM; -import static org.apache.hadoop.ozone.container.common.impl.ContainerData.CHARSET_ENCODING; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.nio.file.Paths; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; - -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.hadoop.fs.FileAlreadyExistsException; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.yaml.snakeyaml.Yaml; - -import com.google.common.base.Preconditions; - -/** - * A set of helper functions to create proper responses. - */ -public final class ContainerUtils { - - private static final Logger LOG = - LoggerFactory.getLogger(ContainerUtils.class); - - private ContainerUtils() { - //never constructed. - } - - /** - * Returns a Container Command Response Builder with the specified result - * and message. - * @param request requestProto message. - * @param result result of the command. - * @param message response message. - * @return ContainerCommand Response Builder. - */ - public static ContainerCommandResponseProto.Builder - getContainerCommandResponse( - ContainerCommandRequestProto request, Result result, String message) { - return ContainerCommandResponseProto.newBuilder() - .setCmdType(request.getCmdType()) - .setTraceID(request.getTraceID()) - .setResult(result) - .setMessage(message); - } - - /** - * Returns a Container Command Response Builder. This call is used to build - * success responses. Calling function can add other fields to the response - * as required. - * @param request requestProto message. - * @return ContainerCommand Response Builder with result as SUCCESS. - */ - public static ContainerCommandResponseProto.Builder getSuccessResponseBuilder( - ContainerCommandRequestProto request) { - return - ContainerCommandResponseProto.newBuilder() - .setCmdType(request.getCmdType()) - .setTraceID(request.getTraceID()) - .setResult(Result.SUCCESS); - } - - /** - * Returns a Container Command Response. This call is used for creating null - * success responses. - * @param request requestProto message. - * @return ContainerCommand Response with result as SUCCESS. - */ - public static ContainerCommandResponseProto getSuccessResponse( - ContainerCommandRequestProto request) { - ContainerCommandResponseProto.Builder builder = - getContainerCommandResponse(request, Result.SUCCESS, ""); - return builder.build(); - } - - /** - * We found a command type but no associated payload for the command. Hence - * return malformed Command as response. - * - * @param request - Protobuf message. - * @return ContainerCommandResponseProto - MALFORMED_REQUEST. - */ - public static ContainerCommandResponseProto malformedRequest( - ContainerCommandRequestProto request) { - return getContainerCommandResponse(request, Result.MALFORMED_REQUEST, - "Cmd type does not match the payload.").build(); - } - - /** - * We found a command type that is not supported yet. - * - * @param request - Protobuf message. - * @return ContainerCommandResponseProto - UNSUPPORTED_REQUEST. - */ - public static ContainerCommandResponseProto unsupportedRequest( - ContainerCommandRequestProto request) { - return getContainerCommandResponse(request, Result.UNSUPPORTED_REQUEST, - "Server does not support this command yet.").build(); - } - - /** - * Logs the error and returns a response to the caller. - * - * @param log - Logger - * @param ex - Exception - * @param request - Request Object - * @return Response - */ - public static ContainerCommandResponseProto logAndReturnError( - Logger log, StorageContainerException ex, - ContainerCommandRequestProto request) { - log.info("Operation: {} : Trace ID: {} : Message: {} : Result: {}", - request.getCmdType().name(), request.getTraceID(), - ex.getMessage(), ex.getResult().getValueDescriptor().getName()); - return getContainerCommandResponse(request, ex.getResult(), ex.getMessage()) - .build(); - } - - /** - * get containerName from a container file. - * - * @param containerFile - File - * @return Name of the container. - */ - public static String getContainerNameFromFile(File containerFile) { - Preconditions.checkNotNull(containerFile); - return Paths.get(containerFile.getParent()).resolve( - removeExtension(containerFile.getName())).toString(); - } - - public static long getContainerIDFromFile(File containerFile) { - Preconditions.checkNotNull(containerFile); - String containerID = getContainerNameFromFile(containerFile); - return Long.parseLong(containerID); - } - - /** - * Verifies that this is indeed a new container. - * - * @param containerFile - Container File to verify - * @throws FileAlreadyExistsException - */ - public static void verifyIsNewContainer(File containerFile) throws - FileAlreadyExistsException { - Logger log = LoggerFactory.getLogger(ContainerSet.class); - Preconditions.checkNotNull(containerFile, "containerFile Should not be " + - "null"); - if (containerFile.getParentFile().exists()) { - log.error("Container already exists on disk. File: {}", containerFile - .toPath()); - throw new FileAlreadyExistsException("container already exists on " + - "disk."); - } - } - - public static String getContainerDbFileName(String containerName) { - return containerName + OzoneConsts.DN_CONTAINER_DB; - } - - /** - * Persistent a {@link DatanodeDetails} to a local file. - * - * @throws IOException when read/write error occurs - */ - public synchronized static void writeDatanodeDetailsTo( - DatanodeDetails datanodeDetails, File path) throws IOException { - if (path.exists()) { - if (!path.delete() || !path.createNewFile()) { - throw new IOException("Unable to overwrite the datanode ID file."); - } - } else { - if (!path.getParentFile().exists() && - !path.getParentFile().mkdirs()) { - throw new IOException("Unable to create datanode ID directories."); - } - } - DatanodeIdYaml.createDatanodeIdFile(datanodeDetails, path); - } - - /** - * Read {@link DatanodeDetails} from a local ID file. - * - * @param path ID file local path - * @return {@link DatanodeDetails} - * @throws IOException If the id file is malformed or other I/O exceptions - */ - public synchronized static DatanodeDetails readDatanodeDetailsFrom(File path) - throws IOException { - if (!path.exists()) { - throw new IOException("Datanode ID file not found."); - } - try { - return DatanodeIdYaml.readDatanodeIdFile(path); - } catch (IOException e) { - LOG.warn("Error loading DatanodeDetails yaml from " + - path.getAbsolutePath(), e); - // Try to load as protobuf before giving up - try (FileInputStream in = new FileInputStream(path)) { - return DatanodeDetails.getFromProtoBuf( - HddsProtos.DatanodeDetailsProto.parseFrom(in)); - } catch (IOException io) { - throw new IOException("Failed to parse DatanodeDetails from " - + path.getAbsolutePath(), io); - } - } - } - - /** - * Verify that the checksum stored in containerData is equal to the - * computed checksum. - * @param containerData - * @throws IOException - */ - public static void verifyChecksum(ContainerData containerData) - throws IOException { - String storedChecksum = containerData.getChecksum(); - - Yaml yaml = ContainerDataYaml.getYamlForContainerType( - containerData.getContainerType()); - containerData.computeAndSetChecksum(yaml); - String computedChecksum = containerData.getChecksum(); - - if (storedChecksum == null || !storedChecksum.equals(computedChecksum)) { - throw new StorageContainerException("Container checksum error for " + - "ContainerID: " + containerData.getContainerID() + ". " + - "\nStored Checksum: " + storedChecksum + - "\nExpected Checksum: " + computedChecksum, - CONTAINER_CHECKSUM_ERROR); - } - } - - /** - * Return the SHA-256 chesksum of the containerData. - * @param containerDataYamlStr ContainerData as a Yaml String - * @return Checksum of the container data - * @throws StorageContainerException - */ - public static String getChecksum(String containerDataYamlStr) - throws StorageContainerException { - MessageDigest sha; - try { - sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH); - sha.update(containerDataYamlStr.getBytes(CHARSET_ENCODING)); - return DigestUtils.sha256Hex(sha.digest()); - } catch (NoSuchAlgorithmException e) { - throw new StorageContainerException("Unable to create Message Digest, " + - "usually this is a java configuration issue.", NO_SUCH_ALGORITHM); - } - } - - /** - * Get the .container file from the containerBaseDir. - * @param containerBaseDir container base directory. The name of this - * directory is same as the containerID - * @return the .container file - */ - public static File getContainerFile(File containerBaseDir) { - // Container file layout is - // .../<>/metadata/<>.container - String containerFilePath = OzoneConsts.CONTAINER_META_PATH + File.separator - + getContainerID(containerBaseDir) + OzoneConsts.CONTAINER_EXTENSION; - return new File(containerBaseDir, containerFilePath); - } - - /** - * ContainerID can be decoded from the container base directory name. - */ - public static long getContainerID(File containerBaseDir) { - return Long.parseLong(containerBaseDir.getName()); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java deleted file mode 100644 index d3efa98795a98..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java +++ /dev/null @@ -1,182 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.helpers; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStreamWriter; -import java.io.Writer; -import java.util.LinkedHashMap; -import java.util.Map; - -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.collections.MapUtils; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.yaml.snakeyaml.DumperOptions; -import org.yaml.snakeyaml.Yaml; - -/** - * Class for creating datanode.id file in yaml format. - */ -public final class DatanodeIdYaml { - - private DatanodeIdYaml() { - // static helper methods only, no state. - } - - /** - * Creates a yaml file using DatnodeDetails. This method expects the path - * validation to be performed by the caller. - * - * @param datanodeDetails {@link DatanodeDetails} - * @param path Path to datnode.id file - */ - public static void createDatanodeIdFile(DatanodeDetails datanodeDetails, - File path) throws IOException { - DumperOptions options = new DumperOptions(); - options.setPrettyFlow(true); - options.setDefaultFlowStyle(DumperOptions.FlowStyle.FLOW); - Yaml yaml = new Yaml(options); - - try (Writer writer = new OutputStreamWriter( - new FileOutputStream(path), "UTF-8")) { - yaml.dump(getDatanodeDetailsYaml(datanodeDetails), writer); - } - } - - /** - * Read datanode.id from file. - */ - public static DatanodeDetails readDatanodeIdFile(File path) - throws IOException { - DatanodeDetails datanodeDetails; - try (FileInputStream inputFileStream = new FileInputStream(path)) { - Yaml yaml = new Yaml(); - DatanodeDetailsYaml datanodeDetailsYaml; - try { - datanodeDetailsYaml = - yaml.loadAs(inputFileStream, DatanodeDetailsYaml.class); - } catch (Exception e) { - throw new IOException("Unable to parse yaml file.", e); - } - - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - builder.setUuid(datanodeDetailsYaml.getUuid()) - .setIpAddress(datanodeDetailsYaml.getIpAddress()) - .setHostName(datanodeDetailsYaml.getHostName()) - .setCertSerialId(datanodeDetailsYaml.getCertSerialId()); - - if (!MapUtils.isEmpty(datanodeDetailsYaml.getPortDetails())) { - for (Map.Entry portEntry : - datanodeDetailsYaml.getPortDetails().entrySet()) { - builder.addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.valueOf(portEntry.getKey()), - portEntry.getValue())); - } - } - datanodeDetails = builder.build(); - } - - return datanodeDetails; - } - - /** - * Datanode details bean to be written to the yaml file. - */ - public static class DatanodeDetailsYaml { - private String uuid; - private String ipAddress; - private String hostName; - private String certSerialId; - private Map portDetails; - - public DatanodeDetailsYaml() { - // Needed for snake-yaml introspection. - } - - private DatanodeDetailsYaml(String uuid, String ipAddress, - String hostName, String certSerialId, - Map portDetails) { - this.uuid = uuid; - this.ipAddress = ipAddress; - this.hostName = hostName; - this.certSerialId = certSerialId; - this.portDetails = portDetails; - } - - public String getUuid() { - return uuid; - } - - public String getIpAddress() { - return ipAddress; - } - - public String getHostName() { - return hostName; - } - - public String getCertSerialId() { - return certSerialId; - } - - public Map getPortDetails() { - return portDetails; - } - - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public void setIpAddress(String ipAddress) { - this.ipAddress = ipAddress; - } - - public void setHostName(String hostName) { - this.hostName = hostName; - } - - public void setCertSerialId(String certSerialId) { - this.certSerialId = certSerialId; - } - - public void setPortDetails(Map portDetails) { - this.portDetails = portDetails; - } - } - - private static DatanodeDetailsYaml getDatanodeDetailsYaml( - DatanodeDetails datanodeDetails) { - - Map portDetails = new LinkedHashMap<>(); - if (!CollectionUtils.isEmpty(datanodeDetails.getPorts())) { - for (DatanodeDetails.Port port : datanodeDetails.getPorts()) { - portDetails.put(port.getName().toString(), port.getValue()); - } - } - - return new DatanodeDetailsYaml( - datanodeDetails.getUuid().toString(), - datanodeDetails.getIpAddress(), - datanodeDetails.getHostName(), - datanodeDetails.getCertSerialId(), - portDetails); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java deleted file mode 100644 index 4db6d3120fdf1..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.helpers; - -import org.apache.hadoop.ozone.OzoneConsts; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.util.Properties; - -/** - * This is a utility class which helps to create the version file on datanode - * and also validate the content of the version file. - */ -public class DatanodeVersionFile { - - private final String storageId; - private final String clusterId; - private final String datanodeUuid; - private final long cTime; - private final int layOutVersion; - - public DatanodeVersionFile(String storageId, String clusterId, - String datanodeUuid, long cTime, int layOutVersion) { - this.storageId = storageId; - this.clusterId = clusterId; - this.datanodeUuid = datanodeUuid; - this.cTime = cTime; - this.layOutVersion = layOutVersion; - } - - private Properties createProperties() { - Properties properties = new Properties(); - properties.setProperty(OzoneConsts.STORAGE_ID, storageId); - properties.setProperty(OzoneConsts.CLUSTER_ID, clusterId); - properties.setProperty(OzoneConsts.DATANODE_UUID, datanodeUuid); - properties.setProperty(OzoneConsts.CTIME, String.valueOf(cTime)); - properties.setProperty(OzoneConsts.LAYOUTVERSION, String.valueOf( - layOutVersion)); - return properties; - } - - /** - * Creates a version File in specified path. - * @param path - * @throws IOException - */ - public void createVersionFile(File path) throws - IOException { - try (RandomAccessFile file = new RandomAccessFile(path, "rws"); - FileOutputStream out = new FileOutputStream(file.getFD())) { - file.getChannel().truncate(0); - Properties properties = createProperties(); - /* - * If server is interrupted before this line, - * the version file will remain unchanged. - */ - properties.store(out, null); - } - } - - - /** - * Creates a property object from the specified file content. - * @param versionFile - * @return Properties - * @throws IOException - */ - public static Properties readFrom(File versionFile) throws IOException { - try (RandomAccessFile file = new RandomAccessFile(versionFile, "rws"); - FileInputStream in = new FileInputStream(file.getFD())) { - Properties props = new Properties(); - props.load(in); - return props; - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java deleted file mode 100644 index 9d0ec957f28fd..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.helpers; - -import com.google.common.collect.Maps; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.util.StringUtils; - -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * A helper class to wrap the info about under deletion container blocks. - */ -public final class DeletedContainerBlocksSummary { - - private final List blocks; - // key : txID - // value : times of this tx has been processed - private final Map txSummary; - // key : container name - // value : the number of blocks need to be deleted in this container - // if the message contains multiple entries for same block, - // blocks will be merged - private final Map blockSummary; - // total number of blocks in this message - private int numOfBlocks; - - private DeletedContainerBlocksSummary(List blocks) { - this.blocks = blocks; - txSummary = Maps.newHashMap(); - blockSummary = Maps.newHashMap(); - blocks.forEach(entry -> { - txSummary.put(entry.getTxID(), entry.getCount()); - if (blockSummary.containsKey(entry.getContainerID())) { - blockSummary.put(entry.getContainerID(), - blockSummary.get(entry.getContainerID()) - + entry.getLocalIDCount()); - } else { - blockSummary.put(entry.getContainerID(), entry.getLocalIDCount()); - } - numOfBlocks += entry.getLocalIDCount(); - }); - } - - public static DeletedContainerBlocksSummary getFrom( - List blocks) { - return new DeletedContainerBlocksSummary(blocks); - } - - public int getNumOfBlocks() { - return numOfBlocks; - } - - public int getNumOfContainers() { - return blockSummary.size(); - } - - public String getTXIDs() { - return String.join(",", txSummary.keySet() - .stream().map(String::valueOf).collect(Collectors.toList())); - } - - public String getTxIDSummary() { - List txSummaryEntry = txSummary.entrySet().stream() - .map(entry -> entry.getKey() + "(" + entry.getValue() + ")") - .collect(Collectors.toList()); - return "[" + String.join(",", txSummaryEntry) + "]"; - } - - @Override public String toString() { - StringBuffer sb = new StringBuffer(); - for (DeletedBlocksTransaction blks : blocks) { - sb.append(" ") - .append("TXID=") - .append(blks.getTxID()) - .append(", ") - .append("TimesProceed=") - .append(blks.getCount()) - .append(", ") - .append(blks.getContainerID()) - .append(" : [") - .append(StringUtils.join(',', blks.getLocalIDList())).append("]") - .append("\n"); - } - return sb.toString(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java deleted file mode 100644 index 21f31e1bbe3ff..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.helpers; -/** - Contains protocol buffer helper classes and utilites used in - impl. - **/ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java deleted file mode 100644 index d1b1bd664935b..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.impl; - - -import com.google.common.base.Preconditions; - -/** - * Defines layout versions for the Chunks. - */ - -public final class ChunkLayOutVersion { - - private final static ChunkLayOutVersion[] CHUNK_LAYOUT_VERSION_INFOS = - {new ChunkLayOutVersion(1, "Data without checksums.")}; - - private int version; - private String description; - - - /** - * Never created outside this class. - * - * @param description -- description - * @param version -- version number - */ - private ChunkLayOutVersion(int version, String description) { - this.version = version; - this.description = description; - } - - /** - * Return ChunkLayOutVersion object for the chunkVersion. - * @param chunkVersion - * @return ChunkLayOutVersion - */ - public static ChunkLayOutVersion getChunkLayOutVersion(int chunkVersion) { - Preconditions.checkArgument((chunkVersion <= ChunkLayOutVersion - .getLatestVersion().getVersion())); - for(ChunkLayOutVersion chunkLayOutVersion : CHUNK_LAYOUT_VERSION_INFOS) { - if(chunkLayOutVersion.getVersion() == chunkVersion) { - return chunkLayOutVersion; - } - } - return null; - } - - /** - * Returns all versions. - * - * @return Version info array. - */ - public static ChunkLayOutVersion[] getAllVersions() { - return CHUNK_LAYOUT_VERSION_INFOS.clone(); - } - - /** - * Returns the latest version. - * - * @return versionInfo - */ - public static ChunkLayOutVersion getLatestVersion() { - return CHUNK_LAYOUT_VERSION_INFOS[CHUNK_LAYOUT_VERSION_INFOS.length - 1]; - } - - /** - * Return version. - * - * @return int - */ - public int getVersion() { - return version; - } - - /** - * Returns description. - * @return String - */ - public String getDescription() { - return description; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java deleted file mode 100644 index 85738e240974a..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java +++ /dev/null @@ -1,560 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import java.io.IOException; -import java.nio.charset.Charset; -import java.util.List; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - ContainerType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; - -import java.util.Collections; -import java.util.Map; -import java.util.TreeMap; -import java.util.concurrent.atomic.AtomicLong; -import org.yaml.snakeyaml.Yaml; - -import static org.apache.hadoop.ozone.OzoneConsts.CHECKSUM; -import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ID; -import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_TYPE; -import static org.apache.hadoop.ozone.OzoneConsts.LAYOUTVERSION; -import static org.apache.hadoop.ozone.OzoneConsts.MAX_SIZE; -import static org.apache.hadoop.ozone.OzoneConsts.METADATA; -import static org.apache.hadoop.ozone.OzoneConsts.ORIGIN_NODE_ID; -import static org.apache.hadoop.ozone.OzoneConsts.ORIGIN_PIPELINE_ID; -import static org.apache.hadoop.ozone.OzoneConsts.STATE; - -/** - * ContainerData is the in-memory representation of container metadata and is - * represented on disk by the .container file. - */ -public abstract class ContainerData { - - //Type of the container. - // For now, we support only KeyValueContainer. - private final ContainerType containerType; - - // Unique identifier for the container - private final long containerID; - - // Layout version of the container data - private final int layOutVersion; - - // Metadata of the container will be a key value pair. - // This can hold information like volume name, owner etc., - private final Map metadata; - - // State of the Container - private ContainerDataProto.State state; - - private final long maxSize; - - private boolean committedSpace; - - //ID of the pipeline where this container is created - private String originPipelineId; - //ID of the datanode where this container is created - private String originNodeId; - - /** parameters for read/write statistics on the container. **/ - private final AtomicLong readBytes; - private final AtomicLong writeBytes; - private final AtomicLong readCount; - private final AtomicLong writeCount; - private final AtomicLong bytesUsed; - private final AtomicLong keyCount; - - private HddsVolume volume; - - private String checksum; - public static final Charset CHARSET_ENCODING = Charset.forName("UTF-8"); - private static final String DUMMY_CHECKSUM = new String(new byte[64], - CHARSET_ENCODING); - - // Common Fields need to be stored in .container file. - protected static final List YAML_FIELDS = - Collections.unmodifiableList(Lists.newArrayList( - CONTAINER_TYPE, - CONTAINER_ID, - LAYOUTVERSION, - STATE, - METADATA, - MAX_SIZE, - CHECKSUM, - ORIGIN_PIPELINE_ID, - ORIGIN_NODE_ID)); - - /** - * Creates a ContainerData Object, which holds metadata of the container. - * @param type - ContainerType - * @param containerId - ContainerId - * @param size - container maximum size in bytes - * @param originPipelineId - Pipeline Id where this container is/was created - * @param originNodeId - Node Id where this container is/was created - */ - protected ContainerData(ContainerType type, long containerId, long size, - String originPipelineId, String originNodeId) { - this(type, containerId, ChunkLayOutVersion.getLatestVersion().getVersion(), - size, originPipelineId, originNodeId); - } - - /** - * Creates a ContainerData Object, which holds metadata of the container. - * @param type - ContainerType - * @param containerId - ContainerId - * @param layOutVersion - Container layOutVersion - * @param size - Container maximum size in bytes - * @param originPipelineId - Pipeline Id where this container is/was created - * @param originNodeId - Node Id where this container is/was created - */ - protected ContainerData(ContainerType type, long containerId, - int layOutVersion, long size, String originPipelineId, - String originNodeId) { - Preconditions.checkNotNull(type); - - this.containerType = type; - this.containerID = containerId; - this.layOutVersion = layOutVersion; - this.metadata = new TreeMap<>(); - this.state = ContainerDataProto.State.OPEN; - this.readCount = new AtomicLong(0L); - this.readBytes = new AtomicLong(0L); - this.writeCount = new AtomicLong(0L); - this.writeBytes = new AtomicLong(0L); - this.bytesUsed = new AtomicLong(0L); - this.keyCount = new AtomicLong(0L); - this.maxSize = size; - this.originPipelineId = originPipelineId; - this.originNodeId = originNodeId; - setChecksumTo0ByteArray(); - } - - /** - * Returns the containerID. - */ - public long getContainerID() { - return containerID; - } - - /** - * Returns the path to base dir of the container. - * @return Path to base dir. - */ - public abstract String getContainerPath(); - - /** - * Returns the type of the container. - * @return ContainerType - */ - public ContainerType getContainerType() { - return containerType; - } - - - /** - * Returns the state of the container. - * @return ContainerLifeCycleState - */ - public synchronized ContainerDataProto.State getState() { - return state; - } - - /** - * Set the state of the container. - * @param state - */ - public synchronized void setState(ContainerDataProto.State state) { - ContainerDataProto.State oldState = this.state; - this.state = state; - - if ((oldState == ContainerDataProto.State.OPEN) && - (state != oldState)) { - releaseCommitSpace(); - } - - /** - * commit space when container transitions (back) to Open. - * when? perhaps closing a container threw an exception - */ - if ((state == ContainerDataProto.State.OPEN) && - (state != oldState)) { - Preconditions.checkState(getMaxSize() > 0); - commitSpace(); - } - } - - /** - * Return's maximum size of the container in bytes. - * @return maxSize in bytes - */ - public long getMaxSize() { - return maxSize; - } - - /** - * Returns the layOutVersion of the actual container data format. - * @return layOutVersion - */ - public int getLayOutVersion() { - return ChunkLayOutVersion.getChunkLayOutVersion(layOutVersion).getVersion(); - } - - /** - * Add/Update metadata. - * We should hold the container lock before updating the metadata as this - * will be persisted on disk. Unless, we are reconstructing ContainerData - * from protoBuf or from on disk .container file in which case lock is not - * required. - */ - public void addMetadata(String key, String value) { - metadata.put(key, value); - } - - /** - * Retuns metadata of the container. - * @return metadata - */ - public Map getMetadata() { - return Collections.unmodifiableMap(this.metadata); - } - - /** - * Set metadata. - * We should hold the container lock before updating the metadata as this - * will be persisted on disk. Unless, we are reconstructing ContainerData - * from protoBuf or from on disk .container file in which case lock is not - * required. - */ - public void setMetadata(Map metadataMap) { - metadata.clear(); - metadata.putAll(metadataMap); - } - - /** - * checks if the container is open. - * @return - boolean - */ - public synchronized boolean isOpen() { - return ContainerDataProto.State.OPEN == state; - } - - /** - * checks if the container is invalid. - * @return - boolean - */ - public synchronized boolean isValid() { - return !(ContainerDataProto.State.INVALID == state); - } - - /** - * checks if the container is closed. - * @return - boolean - */ - public synchronized boolean isClosed() { - return ContainerDataProto.State.CLOSED == state; - } - - /** - * checks if the container is quasi closed. - * @return - boolean - */ - public synchronized boolean isQuasiClosed() { - return ContainerDataProto.State.QUASI_CLOSED == state; - } - - /** - * checks if the container is unhealthy. - * @return - boolean - */ - public synchronized boolean isUnhealthy() { - return ContainerDataProto.State.UNHEALTHY == state; - } - - /** - * Marks this container as quasi closed. - */ - public synchronized void quasiCloseContainer() { - setState(ContainerDataProto.State.QUASI_CLOSED); - } - - /** - * Marks this container as closed. - */ - public synchronized void closeContainer() { - setState(ContainerDataProto.State.CLOSED); - } - - private void releaseCommitSpace() { - long unused = getMaxSize() - getBytesUsed(); - - // only if container size < max size - if (unused > 0 && committedSpace) { - getVolume().incCommittedBytes(0 - unused); - } - committedSpace = false; - } - - /** - * add available space in the container to the committed space in the volume. - * available space is the number of bytes remaining till max capacity. - */ - public void commitSpace() { - long unused = getMaxSize() - getBytesUsed(); - ContainerDataProto.State myState = getState(); - HddsVolume cVol; - - //we don't expect duplicate calls - Preconditions.checkState(!committedSpace); - - // Only Open Containers have Committed Space - if (myState != ContainerDataProto.State.OPEN) { - return; - } - - // junit tests do not always set up volume - cVol = getVolume(); - if (unused > 0 && (cVol != null)) { - cVol.incCommittedBytes(unused); - committedSpace = true; - } - } - - /** - * Get the number of bytes read from the container. - * @return the number of bytes read from the container. - */ - public long getReadBytes() { - return readBytes.get(); - } - - /** - * Increase the number of bytes read from the container. - * @param bytes number of bytes read. - */ - public void incrReadBytes(long bytes) { - this.readBytes.addAndGet(bytes); - } - - /** - * Get the number of times the container is read. - * @return the number of times the container is read. - */ - public long getReadCount() { - return readCount.get(); - } - - /** - * Increase the number of container read count by 1. - */ - public void incrReadCount() { - this.readCount.incrementAndGet(); - } - - /** - * Get the number of bytes write into the container. - * @return the number of bytes write into the container. - */ - public long getWriteBytes() { - return writeBytes.get(); - } - - /** - * Increase the number of bytes write into the container. - * Also decrement committed bytes against the bytes written. - * @param bytes the number of bytes write into the container. - */ - public void incrWriteBytes(long bytes) { - long unused = getMaxSize() - getBytesUsed(); - - this.writeBytes.addAndGet(bytes); - - // only if container size < max size - if (committedSpace && unused > 0) { - //with this write, container size might breach max size - long decrement = Math.min(bytes, unused); - this.getVolume().incCommittedBytes(0 - decrement); - } - } - - /** - * Get the number of writes into the container. - * @return the number of writes into the container. - */ - public long getWriteCount() { - return writeCount.get(); - } - - /** - * Increase the number of writes into the container by 1. - */ - public void incrWriteCount() { - this.writeCount.incrementAndGet(); - } - - /** - * Sets the number of bytes used by the container. - * @param used - */ - public void setBytesUsed(long used) { - this.bytesUsed.set(used); - } - - /** - * Get the number of bytes used by the container. - * @return the number of bytes used by the container. - */ - public long getBytesUsed() { - return bytesUsed.get(); - } - - /** - * Increase the number of bytes used by the container. - * @param used number of bytes used by the container. - * @return the current number of bytes used by the container afert increase. - */ - public long incrBytesUsed(long used) { - return this.bytesUsed.addAndGet(used); - } - - /** - * Decrease the number of bytes used by the container. - * @param reclaimed the number of bytes reclaimed from the container. - * @return the current number of bytes used by the container after decrease. - */ - public long decrBytesUsed(long reclaimed) { - return this.bytesUsed.addAndGet(-1L * reclaimed); - } - - /** - * Set the Volume for the Container. - * This should be called only from the createContainer. - * @param hddsVolume - */ - public void setVolume(HddsVolume hddsVolume) { - this.volume = hddsVolume; - } - - /** - * Returns the volume of the Container. - * @return HddsVolume - */ - public HddsVolume getVolume() { - return volume; - } - - /** - * Increments the number of keys in the container. - */ - public void incrKeyCount() { - this.keyCount.incrementAndGet(); - } - - /** - * Decrements number of keys in the container. - */ - public void decrKeyCount() { - this.keyCount.decrementAndGet(); - } - - /** - * Returns number of keys in the container. - * @return key count - */ - public long getKeyCount() { - return this.keyCount.get(); - } - - /** - * Set's number of keys in the container. - * @param count - */ - public void setKeyCount(long count) { - this.keyCount.set(count); - } - - public void setChecksumTo0ByteArray() { - this.checksum = DUMMY_CHECKSUM; - } - - public void setChecksum(String checkSum) { - this.checksum = checkSum; - } - - public String getChecksum() { - return this.checksum; - } - - - /** - * Returns the origin pipeline Id of this container. - * @return origin node Id - */ - public String getOriginPipelineId() { - return originPipelineId; - } - - /** - * Returns the origin node Id of this container. - * @return origin node Id - */ - public String getOriginNodeId() { - return originNodeId; - } - - /** - * Compute the checksum for ContainerData using the specified Yaml (based - * on ContainerType) and set the checksum. - * - * Checksum of ContainerData is calculated by setting the - * {@link ContainerData#checksum} field to a 64-byte array with all 0's - - * {@link ContainerData#DUMMY_CHECKSUM}. After the checksum is calculated, - * the checksum field is updated with this value. - * - * @param yaml Yaml for ContainerType to get the ContainerData as Yaml String - * @throws IOException - */ - public void computeAndSetChecksum(Yaml yaml) throws IOException { - // Set checksum to dummy value - 0 byte array, to calculate the checksum - // of rest of the data. - setChecksumTo0ByteArray(); - - // Dump yaml data into a string to compute its checksum - String containerDataYamlStr = yaml.dump(this); - - this.checksum = ContainerUtils.getChecksum(containerDataYamlStr); - } - - /** - * Returns a ProtoBuf Message from ContainerData. - * - * @return Protocol Buffer Message - */ - public abstract ContainerProtos.ContainerDataProto getProtoBufMessage(); - - /** - * Returns the blockCommitSequenceId. - */ - public abstract long getBlockCommitSequenceId(); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java deleted file mode 100644 index 1f9966c1a76c4..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java +++ /dev/null @@ -1,323 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import java.beans.IntrospectionException; -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStreamWriter; -import java.io.Writer; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeSet; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerType; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; - -import com.google.common.base.Preconditions; -import static org.apache.hadoop.ozone.container.keyvalue - .KeyValueContainerData.KEYVALUE_YAML_TAG; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.yaml.snakeyaml.Yaml; -import org.yaml.snakeyaml.constructor.AbstractConstruct; -import org.yaml.snakeyaml.constructor.Constructor; -import org.yaml.snakeyaml.introspector.BeanAccess; -import org.yaml.snakeyaml.introspector.Property; -import org.yaml.snakeyaml.introspector.PropertyUtils; -import org.yaml.snakeyaml.nodes.MappingNode; -import org.yaml.snakeyaml.nodes.Node; -import org.yaml.snakeyaml.nodes.ScalarNode; -import org.yaml.snakeyaml.nodes.Tag; -import org.yaml.snakeyaml.representer.Representer; - -/** - * Class for creating and reading .container files. - */ - -public final class ContainerDataYaml { - - private static final Logger LOG = - LoggerFactory.getLogger(ContainerDataYaml.class); - - private ContainerDataYaml() { - - } - - /** - * Creates a .container file in yaml format. - * - * @param containerFile - * @param containerData - * @throws IOException - */ - public static void createContainerFile(ContainerType containerType, - ContainerData containerData, File containerFile) throws IOException { - Writer writer = null; - FileOutputStream out = null; - try { - // Create Yaml for given container type - Yaml yaml = getYamlForContainerType(containerType); - // Compute Checksum and update ContainerData - containerData.computeAndSetChecksum(yaml); - - // Write the ContainerData with checksum to Yaml file. - out = new FileOutputStream( - containerFile); - writer = new OutputStreamWriter(out, "UTF-8"); - yaml.dump(containerData, writer); - - } finally { - try { - if (writer != null) { - writer.flush(); - // make sure the container metadata is synced to disk. - out.getFD().sync(); - writer.close(); - } - } catch (IOException ex) { - LOG.warn("Error occurred during closing the writer. ContainerID: " + - containerData.getContainerID()); - } - } - } - - /** - * Read the yaml file, and return containerData. - * - * @throws IOException - */ - public static ContainerData readContainerFile(File containerFile) - throws IOException { - Preconditions.checkNotNull(containerFile, "containerFile cannot be null"); - try (FileInputStream inputFileStream = new FileInputStream(containerFile)) { - return readContainer(inputFileStream); - } - - } - - /** - * Read the yaml file content, and return containerData. - * - * @throws IOException - */ - public static ContainerData readContainer(byte[] containerFileContent) - throws IOException { - return readContainer( - new ByteArrayInputStream(containerFileContent)); - } - - /** - * Read the yaml content, and return containerData. - * - * @throws IOException - */ - public static ContainerData readContainer(InputStream input) - throws IOException { - - ContainerData containerData; - PropertyUtils propertyUtils = new PropertyUtils(); - propertyUtils.setBeanAccess(BeanAccess.FIELD); - propertyUtils.setAllowReadOnlyProperties(true); - - Representer representer = new ContainerDataRepresenter(); - representer.setPropertyUtils(propertyUtils); - - Constructor containerDataConstructor = new ContainerDataConstructor(); - - Yaml yaml = new Yaml(containerDataConstructor, representer); - yaml.setBeanAccess(BeanAccess.FIELD); - - containerData = (ContainerData) - yaml.load(input); - - return containerData; - } - - /** - * Given a ContainerType this method returns a Yaml representation of - * the container properties. - * - * @param containerType type of container - * @return Yamal representation of container properties - * - * @throws StorageContainerException if the type is unrecognized - */ - public static Yaml getYamlForContainerType(ContainerType containerType) - throws StorageContainerException { - PropertyUtils propertyUtils = new PropertyUtils(); - propertyUtils.setBeanAccess(BeanAccess.FIELD); - propertyUtils.setAllowReadOnlyProperties(true); - - switch (containerType) { - case KeyValueContainer: - Representer representer = new ContainerDataRepresenter(); - representer.setPropertyUtils(propertyUtils); - representer.addClassTag( - KeyValueContainerData.class, - KeyValueContainerData.KEYVALUE_YAML_TAG); - - Constructor keyValueDataConstructor = new ContainerDataConstructor(); - - return new Yaml(keyValueDataConstructor, representer); - default: - throw new StorageContainerException("Unrecognized container Type " + - "format " + containerType, ContainerProtos.Result - .UNKNOWN_CONTAINER_TYPE); - } - } - - /** - * Representer class to define which fields need to be stored in yaml file. - */ - private static class ContainerDataRepresenter extends Representer { - @Override - protected Set getProperties(Class type) - throws IntrospectionException { - Set set = super.getProperties(type); - Set filtered = new TreeSet(); - - // When a new Container type is added, we need to add what fields need - // to be filtered here - if (type.equals(KeyValueContainerData.class)) { - List yamlFields = KeyValueContainerData.getYamlFields(); - // filter properties - for (Property prop : set) { - String name = prop.getName(); - if (yamlFields.contains(name)) { - filtered.add(prop); - } - } - } - return filtered; - } - } - - /** - * Constructor class for KeyValueData, which will be used by Yaml. - */ - private static class ContainerDataConstructor extends Constructor { - ContainerDataConstructor() { - //Adding our own specific constructors for tags. - // When a new Container type is added, we need to add yamlConstructor - // for that - this.yamlConstructors.put( - KEYVALUE_YAML_TAG, new ConstructKeyValueContainerData()); - this.yamlConstructors.put(Tag.INT, new ConstructLong()); - } - - private class ConstructKeyValueContainerData extends AbstractConstruct { - public Object construct(Node node) { - MappingNode mnode = (MappingNode) node; - Map nodes = constructMapping(mnode); - - //Needed this, as TAG.INT type is by default converted to Long. - long layOutVersion = (long) nodes.get(OzoneConsts.LAYOUTVERSION); - int lv = (int) layOutVersion; - - long size = (long) nodes.get(OzoneConsts.MAX_SIZE); - - String originPipelineId = (String) nodes.get( - OzoneConsts.ORIGIN_PIPELINE_ID); - String originNodeId = (String) nodes.get(OzoneConsts.ORIGIN_NODE_ID); - - //When a new field is added, it needs to be added here. - KeyValueContainerData kvData = new KeyValueContainerData( - (long) nodes.get(OzoneConsts.CONTAINER_ID), lv, size, - originPipelineId, originNodeId); - - kvData.setContainerDBType((String)nodes.get( - OzoneConsts.CONTAINER_DB_TYPE)); - kvData.setMetadataPath((String) nodes.get( - OzoneConsts.METADATA_PATH)); - kvData.setChunksPath((String) nodes.get(OzoneConsts.CHUNKS_PATH)); - Map meta = (Map) nodes.get(OzoneConsts.METADATA); - kvData.setMetadata(meta); - kvData.setChecksum((String) nodes.get(OzoneConsts.CHECKSUM)); - String state = (String) nodes.get(OzoneConsts.STATE); - kvData - .setState(ContainerProtos.ContainerDataProto.State.valueOf(state)); - return kvData; - } - } - - //Below code is taken from snake yaml, as snakeyaml tries to fit the - // number if it fits in integer, otherwise returns long. So, slightly - // modified the code to return long in all cases. - private class ConstructLong extends AbstractConstruct { - public Object construct(Node node) { - String value = constructScalar((ScalarNode) node).toString() - .replaceAll("_", ""); - int sign = +1; - char first = value.charAt(0); - if (first == '-') { - sign = -1; - value = value.substring(1); - } else if (first == '+') { - value = value.substring(1); - } - int base = 10; - if ("0".equals(value)) { - return Long.valueOf(0); - } else if (value.startsWith("0b")) { - value = value.substring(2); - base = 2; - } else if (value.startsWith("0x")) { - value = value.substring(2); - base = 16; - } else if (value.startsWith("0")) { - value = value.substring(1); - base = 8; - } else if (value.indexOf(':') != -1) { - String[] digits = value.split(":"); - int bes = 1; - int val = 0; - for (int i = 0, j = digits.length; i < j; i++) { - val += (Long.parseLong(digits[(j - i) - 1]) * bes); - bes *= 60; - } - return createNumber(sign, String.valueOf(val), 10); - } else { - return createNumber(sign, value, 10); - } - return createNumber(sign, value, base); - } - } - - private Number createNumber(int sign, String number, int radix) { - Number result; - if (sign < 0) { - number = "-" + number; - } - result = Long.valueOf(number, radix); - return result; - } - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java deleted file mode 100644 index 41415ebe0ac3c..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ /dev/null @@ -1,281 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.Set; -import java.util.List; -import java.util.Collections; -import java.util.Map; -import java.util.concurrent.ConcurrentNavigableMap; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.ConcurrentSkipListSet; - - -/** - * Class that manages Containers created on the datanode. - */ -public class ContainerSet { - - private static final Logger LOG = LoggerFactory.getLogger(ContainerSet.class); - - private final ConcurrentSkipListMap> containerMap = new - ConcurrentSkipListMap<>(); - private final ConcurrentSkipListSet missingContainerSet = - new ConcurrentSkipListSet<>(); - /** - * Add Container to container map. - * @param container container to be added - * @return If container is added to containerMap returns true, otherwise - * false - */ - public boolean addContainer(Container container) throws - StorageContainerException { - Preconditions.checkNotNull(container, "container cannot be null"); - - long containerId = container.getContainerData().getContainerID(); - if (containerMap.putIfAbsent(containerId, container) == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Container with container Id {} is added to containerMap", - containerId); - } - // wish we could have done this from ContainerData.setState - container.getContainerData().commitSpace(); - return true; - } else { - LOG.warn("Container already exists with container Id {}", containerId); - throw new StorageContainerException("Container already exists with " + - "container Id " + containerId, - ContainerProtos.Result.CONTAINER_EXISTS); - } - } - - /** - * Returns the Container with specified containerId. - * @param containerId ID of the container to get - * @return Container - */ - public Container getContainer(long containerId) { - Preconditions.checkState(containerId >= 0, - "Container Id cannot be negative."); - return containerMap.get(containerId); - } - - /** - * Removes the Container matching with specified containerId. - * @param containerId ID of the container to remove - * @return If container is removed from containerMap returns true, otherwise - * false - */ - public boolean removeContainer(long containerId) { - Preconditions.checkState(containerId >= 0, - "Container Id cannot be negative."); - Container removed = containerMap.remove(containerId); - if (removed == null) { - LOG.debug("Container with containerId {} is not present in " + - "containerMap", containerId); - return false; - } else { - LOG.debug("Container with containerId {} is removed from containerMap", - containerId); - return true; - } - } - - /** - * Return number of containers in container map. - * @return container count - */ - @VisibleForTesting - public int containerCount() { - return containerMap.size(); - } - - /** - * Return an container Iterator over {@link ContainerSet#containerMap}. - * @return {@literal Iterator>} - */ - public Iterator> getContainerIterator() { - return containerMap.values().iterator(); - } - - /** - * Return an iterator of containers associated with the specified volume. - * - * @param volume the HDDS volume which should be used to filter containers - * @return {@literal Iterator>} - */ - public Iterator> getContainerIterator(HddsVolume volume) { - Preconditions.checkNotNull(volume); - Preconditions.checkNotNull(volume.getStorageID()); - String volumeUuid = volume.getStorageID(); - return containerMap.values().stream() - .filter(x -> volumeUuid.equals(x.getContainerData().getVolume() - .getStorageID())) - .iterator(); - } - - /** - * Return an containerMap iterator over {@link ContainerSet#containerMap}. - * @return containerMap Iterator - */ - public Iterator>> getContainerMapIterator() { - return containerMap.entrySet().iterator(); - } - - /** - * Return a copy of the containerMap. - * @return containerMap - */ - @VisibleForTesting - public Map> getContainerMapCopy() { - return ImmutableMap.copyOf(containerMap); - } - - public Map> getContainerMap() { - return Collections.unmodifiableMap(containerMap); - } - - /** - * A simple interface for container Iterations. - *

- * This call make no guarantees about consistency of the data between - * different list calls. It just returns the best known data at that point of - * time. It is possible that using this iteration you can miss certain - * container from the listing. - * - * @param startContainerId - Return containers with Id >= startContainerId. - * @param count - how many to return - * @param data - Actual containerData - */ - public void listContainer(long startContainerId, long count, - List data) throws - StorageContainerException { - Preconditions.checkNotNull(data, - "Internal assertion: data cannot be null"); - Preconditions.checkState(startContainerId >= 0, - "Start container Id cannot be negative"); - Preconditions.checkState(count > 0, - "max number of containers returned " + - "must be positive"); - LOG.debug("listContainer returns containerData starting from {} of count " + - "{}", startContainerId, count); - ConcurrentNavigableMap> map; - if (startContainerId == 0) { - map = containerMap.tailMap(containerMap.firstKey(), true); - } else { - map = containerMap.tailMap(startContainerId, true); - } - int currentCount = 0; - for (Container entry : map.values()) { - if (currentCount < count) { - data.add(entry.getContainerData()); - currentCount++; - } else { - return; - } - } - } - - /** - * Get container report. - * - * @return The container report. - */ - public ContainerReportsProto getContainerReport() throws IOException { - LOG.debug("Starting container report iteration."); - - // No need for locking since containerMap is a ConcurrentSkipListMap - // And we can never get the exact state since close might happen - // after we iterate a point. - List> containers = new ArrayList<>(containerMap.values()); - - ContainerReportsProto.Builder crBuilder = - ContainerReportsProto.newBuilder(); - - for (Container container: containers) { - crBuilder.addReports(container.getContainerReport()); - } - - return crBuilder.build(); - } - - public Set getMissingContainerSet() { - return missingContainerSet; - } - - /** - * Builds the missing container set by taking a diff between total no - * containers actually found and number of containers which actually - * got created. It also validates the BCSID stored in the snapshot file - * for each container as against what is reported in containerScan. - * This will only be called during the initialization of Datanode Service - * when it still not a part of any write Pipeline. - * @param container2BCSIDMap Map of containerId to BCSID persisted in the - * Ratis snapshot - */ - public void buildMissingContainerSetAndValidate( - Map container2BCSIDMap) { - container2BCSIDMap.entrySet().parallelStream().forEach((mapEntry) -> { - long id = mapEntry.getKey(); - if (!containerMap.containsKey(id)) { - LOG.warn("Adding container {} to missing container set.", id); - missingContainerSet.add(id); - } else { - Container container = containerMap.get(id); - long containerBCSID = container.getBlockCommitSequenceId(); - long snapshotBCSID = mapEntry.getValue(); - if (containerBCSID < snapshotBCSID) { - LOG.warn( - "Marking container {} unhealthy as reported BCSID {} is smaller" - + " than ratis snapshot recorded value {}", id, - containerBCSID, snapshotBCSID); - // just mark the container unhealthy. Once the DatanodeStateMachine - // thread starts it will send container report to SCM where these - // unhealthy containers would be detected - try { - container.markContainerUnhealthy(); - } catch (StorageContainerException sce) { - // The container will still be marked unhealthy in memory even if - // exception occurs. It won't accept any new transactions and will - // be handled by SCM. Eve if dn restarts, it will still be detected - // as unheathy as its BCSID won't change. - LOG.error("Unable to persist unhealthy state for container {}", id); - } - } - } - }); - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java deleted file mode 100644 index 76f6b3cd2f183..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ /dev/null @@ -1,597 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.hdds.scm.container.common.helpers - .ContainerNotOpenException; -import org.apache.hadoop.hdds.scm.container.common.helpers - .InvalidContainerStateException; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.audit.AuditAction; -import org.apache.hadoop.ozone.audit.AuditEventStatus; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditLoggerType; -import org.apache.hadoop.ozone.audit.AuditMarker; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.audit.Auditor; -import org.apache.hadoop.ozone.container.common.helpers - .ContainerCommandRequestPBHelper; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.ratis - .DispatcherContext; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - ContainerDataProto.State; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; - -import io.opentracing.Scope; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Map; -import java.util.Optional; -import java.util.Set; - -/** - * Ozone Container dispatcher takes a call from the netty server and routes it - * to the right handler function. - */ -public class HddsDispatcher implements ContainerDispatcher, Auditor { - - static final Logger LOG = LoggerFactory.getLogger(HddsDispatcher.class); - private static final AuditLogger AUDIT = - new AuditLogger(AuditLoggerType.DNLOGGER); - private final Map handlers; - private final Configuration conf; - private final ContainerSet containerSet; - private final VolumeSet volumeSet; - private final StateContext context; - private final float containerCloseThreshold; - private String scmID; - private ContainerMetrics metrics; - - /** - * Constructs an OzoneContainer that receives calls from - * XceiverServerHandler. - */ - public HddsDispatcher(Configuration config, ContainerSet contSet, - VolumeSet volumes, Map handlers, - StateContext context, ContainerMetrics metrics) { - this.conf = config; - this.containerSet = contSet; - this.volumeSet = volumes; - this.context = context; - this.handlers = handlers; - this.metrics = metrics; - this.containerCloseThreshold = conf.getFloat( - HddsConfigKeys.HDDS_CONTAINER_CLOSE_THRESHOLD, - HddsConfigKeys.HDDS_CONTAINER_CLOSE_THRESHOLD_DEFAULT); - } - - @Override - public void init() { - } - - @Override - public void shutdown() { - } - - /** - * Returns true for exceptions which can be ignored for marking the container - * unhealthy. - * @param result ContainerCommandResponse error code. - * @return true if exception can be ignored, false otherwise. - */ - private boolean canIgnoreException(Result result) { - switch (result) { - case SUCCESS: - case CONTAINER_UNHEALTHY: - case CLOSED_CONTAINER_IO: - case DELETE_ON_OPEN_CONTAINER: - return true; - default: - return false; - } - } - - @Override - public void buildMissingContainerSetAndValidate( - Map container2BCSIDMap) { - containerSet - .buildMissingContainerSetAndValidate(container2BCSIDMap); - } - - @Override - public ContainerCommandResponseProto dispatch( - ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) { - String spanName = "HddsDispatcher." + msg.getCmdType().name(); - try (Scope scope = TracingUtil - .importAndCreateScope(spanName, msg.getTraceID())) { - return dispatchRequest(msg, dispatcherContext); - } - } - - @SuppressWarnings("methodlength") - private ContainerCommandResponseProto dispatchRequest( - ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) { - Preconditions.checkNotNull(msg); - if (LOG.isTraceEnabled()) { - LOG.trace("Command {}, trace ID: {} ", msg.getCmdType().toString(), - msg.getTraceID()); - } - - AuditAction action = ContainerCommandRequestPBHelper.getAuditAction( - msg.getCmdType()); - EventType eventType = getEventType(msg); - Map params = - ContainerCommandRequestPBHelper.getAuditParams(msg); - - Container container; - ContainerType containerType; - ContainerCommandResponseProto responseProto = null; - long startTime = System.nanoTime(); - ContainerProtos.Type cmdType = msg.getCmdType(); - long containerID = msg.getContainerID(); - metrics.incContainerOpsMetrics(cmdType); - container = getContainer(containerID); - boolean isWriteStage = - (cmdType == ContainerProtos.Type.WriteChunk && dispatcherContext != null - && dispatcherContext.getStage() - == DispatcherContext.WriteChunkStage.WRITE_DATA); - boolean isWriteCommitStage = - (cmdType == ContainerProtos.Type.WriteChunk && dispatcherContext != null - && dispatcherContext.getStage() - == DispatcherContext.WriteChunkStage.COMMIT_DATA); - - // if the command gets executed other than Ratis, the default wroite stage - // is WriteChunkStage.COMBINED - boolean isCombinedStage = - cmdType == ContainerProtos.Type.WriteChunk && (dispatcherContext == null - || dispatcherContext.getStage() - == DispatcherContext.WriteChunkStage.COMBINED); - Map container2BCSIDMap = null; - if (dispatcherContext != null) { - container2BCSIDMap = dispatcherContext.getContainer2BCSIDMap(); - } - if (isWriteCommitStage) { - // check if the container Id exist in the loaded snapshot file. if - // it does not , it infers that , this is a restart of dn where - // the we are reapplying the transaction which was not captured in the - // snapshot. - // just add it to the list, and remove it from missing container set - // as it might have been added in the list during "init". - Preconditions.checkNotNull(container2BCSIDMap); - if (container2BCSIDMap.get(containerID) == null) { - container2BCSIDMap - .put(containerID, container.getBlockCommitSequenceId()); - containerSet.getMissingContainerSet().remove(containerID); - } - } - if (getMissingContainerSet().contains(containerID)) { - StorageContainerException sce = new StorageContainerException( - "ContainerID " + containerID - + " has been lost and and cannot be recreated on this DataNode", - ContainerProtos.Result.CONTAINER_MISSING); - audit(action, eventType, params, AuditEventStatus.FAILURE, sce); - return ContainerUtils.logAndReturnError(LOG, sce, msg); - } - - if (cmdType != ContainerProtos.Type.CreateContainer) { - /** - * Create Container should happen only as part of Write_Data phase of - * writeChunk. - */ - if (container == null && ((isWriteStage || isCombinedStage) - || cmdType == ContainerProtos.Type.PutSmallFile)) { - // If container does not exist, create one for WriteChunk and - // PutSmallFile request - responseProto = createContainer(msg); - if (responseProto.getResult() != Result.SUCCESS) { - StorageContainerException sce = new StorageContainerException( - "ContainerID " + containerID + " creation failed", - responseProto.getResult()); - audit(action, eventType, params, AuditEventStatus.FAILURE, sce); - return ContainerUtils.logAndReturnError(LOG, sce, msg); - } - Preconditions.checkArgument(isWriteStage && container2BCSIDMap != null - || dispatcherContext == null); - if (container2BCSIDMap != null) { - // adds this container to list of containers created in the pipeline - // with initial BCSID recorded as 0. - container2BCSIDMap.putIfAbsent(containerID, Long.valueOf(0)); - } - container = getContainer(containerID); - } - - // if container not found return error - if (container == null) { - StorageContainerException sce = new StorageContainerException( - "ContainerID " + containerID + " does not exist", - ContainerProtos.Result.CONTAINER_NOT_FOUND); - audit(action, eventType, params, AuditEventStatus.FAILURE, sce); - return ContainerUtils.logAndReturnError(LOG, sce, msg); - } - containerType = getContainerType(container); - } else { - if (!msg.hasCreateContainer()) { - audit(action, eventType, params, AuditEventStatus.FAILURE, - new Exception("MALFORMED_REQUEST")); - return ContainerUtils.malformedRequest(msg); - } - containerType = msg.getCreateContainer().getContainerType(); - } - // Small performance optimization. We check if the operation is of type - // write before trying to send CloseContainerAction. - if (!HddsUtils.isReadOnly(msg)) { - sendCloseContainerActionIfNeeded(container); - } - Handler handler = getHandler(containerType); - if (handler == null) { - StorageContainerException ex = new StorageContainerException("Invalid " + - "ContainerType " + containerType, - ContainerProtos.Result.CONTAINER_INTERNAL_ERROR); - // log failure - audit(action, eventType, params, AuditEventStatus.FAILURE, ex); - return ContainerUtils.logAndReturnError(LOG, ex, msg); - } - responseProto = handler.handle(msg, container, dispatcherContext); - if (responseProto != null) { - metrics.incContainerOpsLatencies(cmdType, System.nanoTime() - startTime); - - // If the request is of Write Type and the container operation - // is unsuccessful, it implies the applyTransaction on the container - // failed. All subsequent transactions on the container should fail and - // hence replica will be marked unhealthy here. In this case, a close - // container action will be sent to SCM to close the container. - - // ApplyTransaction called on closed Container will fail with Closed - // container exception. In such cases, ignore the exception here - // If the container is already marked unhealthy, no need to change the - // state here. - - Result result = responseProto.getResult(); - if (cmdType == ContainerProtos.Type.CreateContainer - && result == Result.SUCCESS && dispatcherContext != null) { - Preconditions.checkNotNull(dispatcherContext.getContainer2BCSIDMap()); - container2BCSIDMap.putIfAbsent(containerID, Long.valueOf(0)); - } - if (!HddsUtils.isReadOnly(msg) && !canIgnoreException(result)) { - // If the container is open/closing and the container operation - // has failed, it should be first marked unhealthy and the initiate the - // close container action. This also implies this is the first - // transaction which has failed, so the container is marked unhealthy - // right here. - // Once container is marked unhealthy, all the subsequent write - // transactions will fail with UNHEALTHY_CONTAINER exception. - - // For container to be moved to unhealthy state here, the container can - // only be in open or closing state. - State containerState = container.getContainerData().getState(); - Preconditions.checkState( - containerState == State.OPEN || containerState == State.CLOSING); - // mark and persist the container state to be unhealthy - try { - handler.markContainerUnhealthy(container); - } catch (IOException ioe) { - // just log the error here in case marking the container fails, - // Return the actual failure response to the client - LOG.error("Failed to mark container " + containerID + " UNHEALTHY. ", - ioe); - } - // in any case, the in memory state of the container should be unhealthy - Preconditions.checkArgument( - container.getContainerData().getState() == State.UNHEALTHY); - sendCloseContainerActionIfNeeded(container); - } - - if (result == Result.SUCCESS) { - updateBCSID(container, dispatcherContext, cmdType); - audit(action, eventType, params, AuditEventStatus.SUCCESS, null); - } else { - audit(action, eventType, params, AuditEventStatus.FAILURE, - new Exception(responseProto.getMessage())); - } - - return responseProto; - } else { - // log failure - audit(action, eventType, params, AuditEventStatus.FAILURE, - new Exception("UNSUPPORTED_REQUEST")); - return ContainerUtils.unsupportedRequest(msg); - } - } - - private void updateBCSID(Container container, - DispatcherContext dispatcherContext, ContainerProtos.Type cmdType) { - if (dispatcherContext != null && (cmdType == ContainerProtos.Type.PutBlock - || cmdType == ContainerProtos.Type.PutSmallFile)) { - Preconditions.checkNotNull(container); - long bcsID = container.getBlockCommitSequenceId(); - long containerId = container.getContainerData().getContainerID(); - Map container2BCSIDMap; - container2BCSIDMap = dispatcherContext.getContainer2BCSIDMap(); - Preconditions.checkNotNull(container2BCSIDMap); - Preconditions.checkArgument(container2BCSIDMap.containsKey(containerId)); - // updates the latest BCSID on every putBlock or putSmallFile - // transaction over Ratis. - container2BCSIDMap.computeIfPresent(containerId, (u, v) -> v = bcsID); - } - } - /** - * Create a container using the input container request. - * @param containerRequest - the container request which requires container - * to be created. - * @return ContainerCommandResponseProto container command response. - */ - @VisibleForTesting - ContainerCommandResponseProto createContainer( - ContainerCommandRequestProto containerRequest) { - ContainerProtos.CreateContainerRequestProto.Builder createRequest = - ContainerProtos.CreateContainerRequestProto.newBuilder(); - ContainerType containerType = - ContainerProtos.ContainerType.KeyValueContainer; - createRequest.setContainerType(containerType); - - ContainerCommandRequestProto.Builder requestBuilder = - ContainerCommandRequestProto.newBuilder() - .setCmdType(ContainerProtos.Type.CreateContainer) - .setContainerID(containerRequest.getContainerID()) - .setCreateContainer(createRequest.build()) - .setPipelineID(containerRequest.getPipelineID()) - .setDatanodeUuid(containerRequest.getDatanodeUuid()) - .setTraceID(containerRequest.getTraceID()); - - // TODO: Assuming the container type to be KeyValueContainer for now. - // We need to get container type from the containerRequest. - Handler handler = getHandler(containerType); - return handler.handle(requestBuilder.build(), null, null); - } - - /** - * This will be called as a part of creating the log entry during - * startTransaction in Ratis on the leader node. In such cases, if the - * container is not in open state for writing we should just fail. - * Leader will propagate the exception to client. - * @param msg container command proto - * @throws StorageContainerException In case container state is open for write - * requests and in invalid state for read requests. - */ - @Override - public void validateContainerCommand( - ContainerCommandRequestProto msg) throws StorageContainerException { - long containerID = msg.getContainerID(); - Container container = getContainer(containerID); - if (container == null) { - return; - } - ContainerType containerType = container.getContainerType(); - ContainerProtos.Type cmdType = msg.getCmdType(); - AuditAction action = - ContainerCommandRequestPBHelper.getAuditAction(cmdType); - EventType eventType = getEventType(msg); - Map params = - ContainerCommandRequestPBHelper.getAuditParams(msg); - Handler handler = getHandler(containerType); - if (handler == null) { - StorageContainerException ex = new StorageContainerException( - "Invalid " + "ContainerType " + containerType, - ContainerProtos.Result.CONTAINER_INTERNAL_ERROR); - audit(action, eventType, params, AuditEventStatus.FAILURE, ex); - throw ex; - } - - State containerState = container.getContainerState(); - if (!HddsUtils.isReadOnly(msg) && containerState != State.OPEN) { - switch (cmdType) { - case CreateContainer: - // Create Container is idempotent. There is nothing to validate. - break; - case CloseContainer: - // If the container is unhealthy, closeContainer will be rejected - // while execution. Nothing to validate here. - break; - default: - // if the container is not open, no updates can happen. Just throw - // an exception - ContainerNotOpenException cex = new ContainerNotOpenException( - "Container " + containerID + " in " + containerState + " state"); - audit(action, eventType, params, AuditEventStatus.FAILURE, cex); - throw cex; - } - } else if (HddsUtils.isReadOnly(msg) && containerState == State.INVALID) { - InvalidContainerStateException iex = new InvalidContainerStateException( - "Container " + containerID + " in " + containerState + " state"); - audit(action, eventType, params, AuditEventStatus.FAILURE, iex); - throw iex; - } - } - - /** - * If the container usage reaches the close threshold or the container is - * marked unhealthy we send Close ContainerAction to SCM. - * @param container current state of container - */ - private void sendCloseContainerActionIfNeeded(Container container) { - // We have to find a more efficient way to close a container. - boolean isSpaceFull = isContainerFull(container); - boolean shouldClose = isSpaceFull || isContainerUnhealthy(container); - if (shouldClose) { - ContainerData containerData = container.getContainerData(); - ContainerAction.Reason reason = - isSpaceFull ? ContainerAction.Reason.CONTAINER_FULL : - ContainerAction.Reason.CONTAINER_UNHEALTHY; - ContainerAction action = ContainerAction.newBuilder() - .setContainerID(containerData.getContainerID()) - .setAction(ContainerAction.Action.CLOSE).setReason(reason).build(); - context.addContainerActionIfAbsent(action); - } - } - - private boolean isContainerFull(Container container) { - boolean isOpen = Optional.ofNullable(container) - .map(cont -> cont.getContainerState() == ContainerDataProto.State.OPEN) - .orElse(Boolean.FALSE); - if (isOpen) { - ContainerData containerData = container.getContainerData(); - double containerUsedPercentage = - 1.0f * containerData.getBytesUsed() / containerData.getMaxSize(); - return containerUsedPercentage >= containerCloseThreshold; - } else { - return false; - } - } - - private boolean isContainerUnhealthy(Container container) { - return Optional.ofNullable(container).map( - cont -> (cont.getContainerState() == - ContainerDataProto.State.UNHEALTHY)) - .orElse(Boolean.FALSE); - } - - @Override - public Handler getHandler(ContainerProtos.ContainerType containerType) { - return handlers.get(containerType); - } - - @Override - public void setScmId(String scmId) { - Preconditions.checkNotNull(scmId, "scmId Cannot be null"); - if (this.scmID == null) { - this.scmID = scmId; - for (Map.Entry handlerMap : handlers.entrySet()) { - handlerMap.getValue().setScmID(scmID); - } - } - } - - @VisibleForTesting - public Container getContainer(long containerID) { - return containerSet.getContainer(containerID); - } - - @VisibleForTesting - public Set getMissingContainerSet() { - return containerSet.getMissingContainerSet(); - } - - private ContainerType getContainerType(Container container) { - return container.getContainerType(); - } - - @VisibleForTesting - public void setMetricsForTesting(ContainerMetrics containerMetrics) { - this.metrics = containerMetrics; - } - - private EventType getEventType(ContainerCommandRequestProto msg) { - return HddsUtils.isReadOnly(msg) ? EventType.READ : EventType.WRITE; - } - - private void audit(AuditAction action, EventType eventType, - Map params, AuditEventStatus result, Throwable exception){ - AuditMessage amsg; - switch (result) { - case SUCCESS: - if(eventType == EventType.READ && - AUDIT.getLogger().isInfoEnabled(AuditMarker.READ.getMarker())) { - amsg = buildAuditMessageForSuccess(action, params); - AUDIT.logReadSuccess(amsg); - } else if(eventType == EventType.WRITE && - AUDIT.getLogger().isInfoEnabled(AuditMarker.WRITE.getMarker())) { - amsg = buildAuditMessageForSuccess(action, params); - AUDIT.logWriteSuccess(amsg); - } - break; - - case FAILURE: - if(eventType == EventType.READ && - AUDIT.getLogger().isErrorEnabled(AuditMarker.READ.getMarker())) { - amsg = buildAuditMessageForFailure(action, params, exception); - AUDIT.logReadFailure(amsg); - } else if(eventType == EventType.WRITE && - AUDIT.getLogger().isErrorEnabled(AuditMarker.WRITE.getMarker())) { - amsg = buildAuditMessageForFailure(action, params, exception); - AUDIT.logWriteFailure(amsg); - } - break; - - default: - if (LOG.isDebugEnabled()) { - LOG.debug("Invalid audit event status - " + result); - } - } - } - - //TODO: use GRPC to fetch user and ip details - @Override - public AuditMessage buildAuditMessageForSuccess(AuditAction op, - Map auditMap) { - return new AuditMessage.Builder() - .setUser(null) - .atIp(null) - .forOperation(op.getAction()) - .withParams(auditMap) - .withResult(AuditEventStatus.SUCCESS.toString()) - .withException(null) - .build(); - } - - //TODO: use GRPC to fetch user and ip details - @Override - public AuditMessage buildAuditMessageForFailure(AuditAction op, - Map auditMap, Throwable throwable) { - return new AuditMessage.Builder() - .setUser(null) - .atIp(null) - .forOperation(op.getAction()) - .withParams(auditMap) - .withResult(AuditEventStatus.FAILURE.toString()) - .withException(throwable) - .build(); - } - - enum EventType { - READ, - WRITE - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java deleted file mode 100644 index b736eb536ed5e..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.function.Function; - -/** - * Map: containerId {@literal ->} (localId {@literal ->} {@link BlockData}). - * The outer container map does not entail locking for a better performance. - * The inner {@link BlockDataMap} is synchronized. - * - * This class will maintain list of open keys per container when closeContainer - * command comes, it should autocommit all open keys of a open container before - * marking the container as closed. - */ -public class OpenContainerBlockMap { - /** - * Map: localId {@literal ->} BlockData. - * - * In order to support {@link #getAll()}, the update operations are - * synchronized. - */ - static class BlockDataMap { - private final ConcurrentMap blocks = - new ConcurrentHashMap<>(); - - BlockData get(long localId) { - return blocks.get(localId); - } - - synchronized int removeAndGetSize(long localId) { - blocks.remove(localId); - return blocks.size(); - } - - synchronized BlockData computeIfAbsent( - long localId, Function f) { - return blocks.computeIfAbsent(localId, f); - } - - synchronized List getAll() { - return new ArrayList<>(blocks.values()); - } - } - - /** - * TODO : We may construct the openBlockMap by reading the Block Layout - * for each block inside a container listing all chunk files and reading the - * sizes. This will help to recreate the openKeys Map once the DataNode - * restarts. - * - * For now, we will track all open blocks of a container in the blockMap. - */ - private final ConcurrentMap containers = - new ConcurrentHashMap<>(); - - /** - * Removes the Container matching with specified containerId. - * @param containerId containerId - */ - public void removeContainer(long containerId) { - Preconditions - .checkState(containerId >= 0, "Container Id cannot be negative."); - containers.remove(containerId); - } - - public void addChunk(BlockID blockID, ChunkInfo info) { - Preconditions.checkNotNull(info); - containers.computeIfAbsent(blockID.getContainerID(), - id -> new BlockDataMap()).computeIfAbsent(blockID.getLocalID(), - id -> new BlockData(blockID)).addChunk(info); - } - - /** - * Removes the chunk from the chunkInfo list for the given block. - * @param blockID id of the block - * @param chunkInfo chunk info. - */ - public void removeChunk(BlockID blockID, ChunkInfo chunkInfo) { - Preconditions.checkNotNull(chunkInfo); - Preconditions.checkNotNull(blockID); - Optional.ofNullable(containers.get(blockID.getContainerID())) - .map(blocks -> blocks.get(blockID.getLocalID())) - .ifPresent(keyData -> keyData.removeChunk(chunkInfo)); - } - - /** - * Returns the list of open blocks to the openContainerBlockMap. - * @param containerId container id - * @return List of open blocks - */ - public List getOpenBlocks(long containerId) { - return Optional.ofNullable(containers.get(containerId)) - .map(BlockDataMap::getAll) - .orElseGet(Collections::emptyList); - } - - /** - * removes the block from the block map. - * @param blockID - block ID - */ - public void removeFromBlockMap(BlockID blockID) { - Preconditions.checkNotNull(blockID); - containers.computeIfPresent(blockID.getContainerID(), (containerId, blocks) - -> blocks.removeAndGetSize(blockID.getLocalID()) == 0? null: blocks); - } - - /** - * Returns true if the block exists in the map, false otherwise. - * - * @param blockID - Block ID. - * @return True, if it exists, false otherwise - */ - public boolean checkIfBlockExists(BlockID blockID) { - BlockDataMap keyDataMap = containers.get(blockID.getContainerID()); - return keyDataMap != null && keyDataMap.get(blockID.getLocalID()) != null; - } - - @VisibleForTesting - BlockDataMap getBlockDataMap(long containerId) { - return containers.get(containerId); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java deleted file mode 100644 index 4dde3d6cb7190..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.container.common.interfaces - .ContainerDeletionChoosingPolicy; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - -/** - * Randomly choosing containers for block deletion. - */ -public class RandomContainerDeletionChoosingPolicy - implements ContainerDeletionChoosingPolicy { - private static final Logger LOG = - LoggerFactory.getLogger(RandomContainerDeletionChoosingPolicy.class); - - @Override - public List chooseContainerForBlockDeletion(int count, - Map candidateContainers) - throws StorageContainerException { - Preconditions.checkNotNull(candidateContainers, - "Internal assertion: candidate containers cannot be null"); - - int currentCount = 0; - List result = new LinkedList<>(); - ContainerData[] values = new ContainerData[candidateContainers.size()]; - // to get a shuffle list - for (ContainerData entry : DFSUtil.shuffle( - candidateContainers.values().toArray(values))) { - if (currentCount < count) { - result.add(entry); - currentCount++; - if (LOG.isDebugEnabled()) { - LOG.debug("Select container {} for block deletion, " - + "pending deletion blocks num: {}.", - entry.getContainerID(), - ((KeyValueContainerData) entry).getNumPendingDeletionBlocks()); - } - } else { - break; - } - } - - return result; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java deleted file mode 100644 index 061d09bd4a5e9..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java +++ /dev/null @@ -1,300 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.StorageTypeProto; -import org.apache.hadoop.ozone.container.common.interfaces - .StorageLocationReportMXBean; - -import java.io.IOException; - -/** - * Storage location stats of datanodes that provide back store for containers. - * - */ -public final class StorageLocationReport implements - StorageLocationReportMXBean { - - private final String id; - private final boolean failed; - private final long capacity; - private final long scmUsed; - private final long remaining; - private final StorageType storageType; - private final String storageLocation; - - private StorageLocationReport(String id, boolean failed, long capacity, - long scmUsed, long remaining, StorageType storageType, - String storageLocation) { - this.id = id; - this.failed = failed; - this.capacity = capacity; - this.scmUsed = scmUsed; - this.remaining = remaining; - this.storageType = storageType; - this.storageLocation = storageLocation; - } - - public String getId() { - return id; - } - - public boolean isFailed() { - return failed; - } - - public long getCapacity() { - return capacity; - } - - public long getScmUsed() { - return scmUsed; - } - - public long getRemaining() { - return remaining; - } - - public String getStorageLocation() { - return storageLocation; - } - - @Override - public String getStorageTypeName() { - return storageType.name(); - } - - public StorageType getStorageType() { - return storageType; - } - - - private StorageTypeProto getStorageTypeProto() throws - IOException { - StorageTypeProto storageTypeProto; - switch (getStorageType()) { - case SSD: - storageTypeProto = StorageTypeProto.SSD; - break; - case DISK: - storageTypeProto = StorageTypeProto.DISK; - break; - case ARCHIVE: - storageTypeProto = StorageTypeProto.ARCHIVE; - break; - case PROVIDED: - storageTypeProto = StorageTypeProto.PROVIDED; - break; - case RAM_DISK: - storageTypeProto = StorageTypeProto.RAM_DISK; - break; - default: - throw new IOException("Illegal Storage Type specified"); - } - return storageTypeProto; - } - - private static StorageType getStorageType(StorageTypeProto proto) throws - IOException { - StorageType storageType; - switch (proto) { - case SSD: - storageType = StorageType.SSD; - break; - case DISK: - storageType = StorageType.DISK; - break; - case ARCHIVE: - storageType = StorageType.ARCHIVE; - break; - case PROVIDED: - storageType = StorageType.PROVIDED; - break; - case RAM_DISK: - storageType = StorageType.RAM_DISK; - break; - default: - throw new IOException("Illegal Storage Type specified"); - } - return storageType; - } - - /** - * Returns the SCMStorageReport protoBuf message for the Storage Location - * report. - * @return SCMStorageReport - * @throws IOException In case, the storage type specified is invalid. - */ - public StorageReportProto getProtoBufMessage() throws IOException{ - StorageReportProto.Builder srb = StorageReportProto.newBuilder(); - return srb.setStorageUuid(getId()) - .setCapacity(getCapacity()) - .setScmUsed(getScmUsed()) - .setRemaining(getRemaining()) - .setStorageType(getStorageTypeProto()) - .setStorageLocation(getStorageLocation()) - .setFailed(isFailed()) - .build(); - } - - /** - * Returns the StorageLocationReport from the protoBuf message. - * @param report SCMStorageReport - * @return StorageLocationReport - * @throws IOException in case of invalid storage type - */ - - public static StorageLocationReport getFromProtobuf(StorageReportProto report) - throws IOException { - StorageLocationReport.Builder builder = StorageLocationReport.newBuilder(); - builder.setId(report.getStorageUuid()) - .setStorageLocation(report.getStorageLocation()); - if (report.hasCapacity()) { - builder.setCapacity(report.getCapacity()); - } - if (report.hasScmUsed()) { - builder.setScmUsed(report.getScmUsed()); - } - if (report.hasStorageType()) { - builder.setStorageType(getStorageType(report.getStorageType())); - } - if (report.hasRemaining()) { - builder.setRemaining(report.getRemaining()); - } - - if (report.hasFailed()) { - builder.setFailed(report.getFailed()); - } - return builder.build(); - } - - /** - * Returns StorageLocation.Builder instance. - * - * @return StorageLocation.Builder - */ - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Builder class for building StorageLocationReport. - */ - public static class Builder { - private String id; - private boolean failed; - private long capacity; - private long scmUsed; - private long remaining; - private StorageType storageType; - private String storageLocation; - - /** - * Sets the storageId. - * - * @param idValue storageId - * @return StorageLocationReport.Builder - */ - public Builder setId(String idValue) { - this.id = idValue; - return this; - } - - /** - * Sets whether the volume failed or not. - * - * @param failedValue whether volume failed or not - * @return StorageLocationReport.Builder - */ - public Builder setFailed(boolean failedValue) { - this.failed = failedValue; - return this; - } - - /** - * Sets the capacity of volume. - * - * @param capacityValue capacity - * @return StorageLocationReport.Builder - */ - public Builder setCapacity(long capacityValue) { - this.capacity = capacityValue; - return this; - } - /** - * Sets the scmUsed Value. - * - * @param scmUsedValue storage space used by scm - * @return StorageLocationReport.Builder - */ - public Builder setScmUsed(long scmUsedValue) { - this.scmUsed = scmUsedValue; - return this; - } - - /** - * Sets the remaining free space value. - * - * @param remainingValue remaining free space - * @return StorageLocationReport.Builder - */ - public Builder setRemaining(long remainingValue) { - this.remaining = remainingValue; - return this; - } - - /** - * Sets the storageType. - * - * @param storageTypeValue type of the storage used - * @return StorageLocationReport.Builder - */ - public Builder setStorageType(StorageType storageTypeValue) { - this.storageType = storageTypeValue; - return this; - } - - /** - * Sets the storageLocation. - * - * @param storageLocationValue location of the volume - * @return StorageLocationReport.Builder - */ - public Builder setStorageLocation(String storageLocationValue) { - this.storageLocation = storageLocationValue; - return this; - } - - /** - * Builds and returns StorageLocationReport instance. - * - * @return StorageLocationReport - */ - public StorageLocationReport build() { - return new StorageLocationReport(id, failed, capacity, scmUsed, - remaining, storageType, storageLocation); - } - - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java deleted file mode 100644 index 41fc26716c190..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.ozone.container.common.interfaces - .ContainerDeletionChoosingPolicy; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Collections; -import java.util.Comparator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - -/** - * TopN Ordered choosing policy that choosing containers based on pending - * deletion blocks' number. - */ -public class TopNOrderedContainerDeletionChoosingPolicy - implements ContainerDeletionChoosingPolicy { - private static final Logger LOG = - LoggerFactory.getLogger(TopNOrderedContainerDeletionChoosingPolicy.class); - - /** customized comparator used to compare differentiate container data. **/ - private static final Comparator - KEY_VALUE_CONTAINER_DATA_COMPARATOR = (KeyValueContainerData c1, - KeyValueContainerData c2) -> - Integer.compare(c2.getNumPendingDeletionBlocks(), - c1.getNumPendingDeletionBlocks()); - - @Override - public List chooseContainerForBlockDeletion(int count, - Map candidateContainers) - throws StorageContainerException { - Preconditions.checkNotNull(candidateContainers, - "Internal assertion: candidate containers cannot be null"); - - List result = new LinkedList<>(); - List orderedList = new LinkedList<>(); - for (ContainerData entry : candidateContainers.values()) { - orderedList.add((KeyValueContainerData)entry); - } - Collections.sort(orderedList, KEY_VALUE_CONTAINER_DATA_COMPARATOR); - - // get top N list ordered by pending deletion blocks' number - int currentCount = 0; - for (KeyValueContainerData entry : orderedList) { - if (currentCount < count) { - if (entry.getNumPendingDeletionBlocks() > 0) { - result.add(entry); - currentCount++; - if (LOG.isDebugEnabled()) { - LOG.debug( - "Select container {} for block deletion, " - + "pending deletion blocks num: {}.", - entry.getContainerID(), - entry.getNumPendingDeletionBlocks()); - } - } else { - LOG.debug("Stop looking for next container, there is no" - + " pending deletion block contained in remaining containers."); - break; - } - } else { - break; - } - } - - return result; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java deleted file mode 100644 index 16da5d9deecd2..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.impl; - -/** - This package is contains Ozone container implementation. -**/ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java deleted file mode 100644 index f6931e37a4c24..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.interfaces; - - -import java.io.IOException; -import java.util.NoSuchElementException; - -/** - * Block Iterator for container. Each container type need to implement this - * interface. - * @param - */ -public interface BlockIterator { - - /** - * This checks if iterator has next element. If it has returns true, - * otherwise false. - * @return boolean - */ - boolean hasNext() throws IOException; - - /** - * Seek to first entry. - */ - void seekToFirst(); - - /** - * Seek to last entry. - */ - void seekToLast(); - - /** - * Get next block in the container. - * @return next block or null if there are no blocks - * @throws IOException - */ - T nextBlock() throws IOException, NoSuchElementException; - - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java deleted file mode 100644 index 7f7deaf92063a..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java +++ /dev/null @@ -1,188 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.interfaces; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.Map; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; - -import org.apache.hadoop.hdfs.util.Canceler; -import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.hdfs.util.RwLock; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; - -/** - * Interface for Container Operations. - */ -public interface Container extends RwLock { - - /** - * Creates a container. - * - * @throws StorageContainerException - */ - void create(VolumeSet volumeSet, VolumeChoosingPolicy volumeChoosingPolicy, - String scmId) throws StorageContainerException; - - /** - * Deletes the container. - * - * @throws StorageContainerException - */ - void delete() throws StorageContainerException; - - /** - * Update the container. - * - * @param metaData - * @param forceUpdate if true, update container forcibly. - * @throws StorageContainerException - */ - void update(Map metaData, boolean forceUpdate) - throws StorageContainerException; - - /** - * Get metadata about the container. - * - * @return ContainerData - Container Data. - */ - CONTAINERDATA getContainerData(); - - /** - * Get the Container Lifecycle state. - * - * @return ContainerLifeCycleState - Container State. - */ - ContainerProtos.ContainerDataProto.State getContainerState(); - - /** - * Marks the container for closing. Moves the container to CLOSING state. - */ - void markContainerForClose() throws StorageContainerException; - - /** - * Marks the container replica as unhealthy. - */ - void markContainerUnhealthy() throws StorageContainerException; - - /** - * Quasi Closes a open container, if it is already closed or does not exist a - * StorageContainerException is thrown. - * - * @throws StorageContainerException - */ - void quasiClose() throws StorageContainerException; - - /** - * Closes a open/quasi closed container, if it is already closed or does not - * exist a StorageContainerException is thrown. - * - * @throws StorageContainerException - */ - void close() throws StorageContainerException; - - /** - * Return the ContainerType for the container. - */ - ContainerProtos.ContainerType getContainerType(); - - /** - * Returns containerFile. - */ - File getContainerFile(); - - /** - * updates the DeleteTransactionId. - * @param deleteTransactionId - */ - void updateDeleteTransactionId(long deleteTransactionId); - - /** - * Returns blockIterator for the container. - * @return BlockIterator - * @throws IOException - */ - BlockIterator blockIterator() throws IOException; - - /** - * Import the container from an external archive. - */ - void importContainerData(InputStream stream, - ContainerPacker packer) throws IOException; - - /** - * Export all the data of the container to one output archive with the help - * of the packer. - * - */ - void exportContainerData(OutputStream stream, - ContainerPacker packer) throws IOException; - - /** - * Returns containerReport for the container. - */ - ContainerReplicaProto getContainerReport() - throws StorageContainerException; - - /** - * updates the blockCommitSequenceId. - */ - void updateBlockCommitSequenceId(long blockCommitSequenceId); - - /** - * Returns the blockCommitSequenceId. - */ - long getBlockCommitSequenceId(); - - /** - * check and report the structural integrity of the container. - * @return true if the integrity checks pass - * Scan the container metadata to detect corruption. - */ - boolean scanMetaData(); - - /** - * Return if the container data should be checksum verified to detect - * corruption. The result depends upon the current state of the container - * (e.g. if a container is accepting writes, it may not be a good idea to - * perform checksum verification to avoid concurrency issues). - */ - boolean shouldScanData(); - - /** - * Perform checksum verification for the container data. - * - * @param throttler A reference of {@link DataTransferThrottler} used to - * perform I/O bandwidth throttling - * @param canceler A reference of {@link Canceler} used to cancel the - * I/O bandwidth throttling (e.g. for shutdown purpose). - * @return true if the checksum verification succeeds - * false otherwise - */ - boolean scanData(DataTransferThrottler throttler, Canceler canceler); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java deleted file mode 100644 index 84c4f903f379d..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.interfaces; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; - -import java.util.List; -import java.util.Map; - -/** - * This interface is used for choosing desired containers for - * block deletion. - */ -public interface ContainerDeletionChoosingPolicy { - - /** - * Chooses desired containers for block deletion. - * @param count - * how many to return - * @param candidateContainers - * candidate containers collection - * @return container data list - * @throws StorageContainerException - */ - List chooseContainerForBlockDeletion(int count, - Map candidateContainers) - throws StorageContainerException; - - /** - * Determine if the container has suitable type for this policy. - * @param type type of the container - * @return whether the container type suitable for this policy. - */ - default boolean isValidContainerType(ContainerProtos.ContainerType type) { - if (type == ContainerProtos.ContainerType.KeyValueContainer) { - return true; - } - return false; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java deleted file mode 100644 index ee0b6bcb20001..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.interfaces; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; - -import java.util.Map; - -/** - * Dispatcher acts as the bridge between the transport layer and - * the actual container layer. This layer is capable of transforming - * protobuf objects into corresponding class and issue the function call - * into the lower layers. - * - * The reply from the request is dispatched to the client. - */ -public interface ContainerDispatcher { - /** - * Dispatches commands to container layer. - * @param msg - Command Request - * @param context - Context info related to ContainerStateMachine - * @return Command Response - */ - ContainerCommandResponseProto dispatch(ContainerCommandRequestProto msg, - DispatcherContext context); - - /** - * Validates whether the container command should be executed on the pipeline - * or not. Will be invoked by the leader node in the Ratis pipeline - * @param msg containerCommand - * @throws StorageContainerException - */ - void validateContainerCommand( - ContainerCommandRequestProto msg) throws StorageContainerException; - - /** - * Initialize the Dispatcher. - */ - void init(); - - /** - * finds and builds the missing containers in case of a lost disk etc - * in the ContainerSet. It also validates the BCSID of the containers found. - */ - void buildMissingContainerSetAndValidate(Map container2BCSIDMap); - - /** - * Shutdown Dispatcher services. - */ - void shutdown(); - - /** - * Returns the handler for the specified containerType. - * @param containerType - * @return - */ - Handler getHandler(ContainerProtos.ContainerType containerType); - - /** - * If scmId is not set, this will set scmId, otherwise it is a no-op. - * @param scmId - */ - void setScmId(String scmId); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java deleted file mode 100644 index 9c5fcea1639aa..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.interfaces; - -import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; - -import java.io.IOException; -import java.nio.file.Path; - -/** - * Returns physical path locations, where the containers will be created. - */ -public interface ContainerLocationManager { - /** - * Returns the path where the container should be placed from a set of - * locations. - * - * @return A path where we should place this container and metadata. - * @throws IOException - */ - Path getContainerPath() throws IOException; - - /** - * Returns the path where the container Data file are stored. - * - * @return a path where we place the LevelDB and data files of a container. - * @throws IOException - */ - Path getDataPath(String containerName) throws IOException; - - /** - * Returns an array of storage location usage report. - * @return storage location usage report. - */ - StorageLocationReport[] getLocationReport() throws IOException; - - /** - * Supports clean shutdown of container. - * - * @throws IOException - */ - void shutdown() throws IOException; -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java deleted file mode 100644 index 97d2dc3f202df..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.interfaces; - -import java.io.IOException; - -/** - * Returns physical path locations, where the containers will be created. - */ -public interface ContainerLocationManagerMXBean { - - /** - * Returns an array of storage location usage report. - * - * @return storage location usage report. - */ - StorageLocationReportMXBean[] getLocationReport() throws IOException; - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerPacker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerPacker.java deleted file mode 100644 index 8308c23866b8d..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerPacker.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.interfaces; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - -import org.apache.hadoop.ozone.container.common.impl.ContainerData; - -/** - * Service to pack/unpack ContainerData container data to/from a single byte - * stream. - */ -public interface ContainerPacker { - - /** - * Extract the container data to the path defined by the container. - *

- * This doesn't contain the extraction of the container descriptor file. - * - * @return the byte content of the descriptor (which won't be written to a - * file but returned). - */ - byte[] unpackContainerData(Container container, - InputStream inputStream) - throws IOException; - - /** - * Compress all the container data (chunk data, metadata db AND container - * descriptor) to one single archive. - */ - void pack(Container container, OutputStream destination) - throws IOException; - - /** - * Read the descriptor from the finished archive to get the data before - * importing the container. - */ - byte[] unpackContainerDescriptor(InputStream inputStream) - throws IOException; -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java deleted file mode 100644 index 8c3b981a09385..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.interfaces; - - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerType; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; -import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; - -/** - * Dispatcher sends ContainerCommandRequests to Handler. Each Container Type - * should have an implementation for Handler. - */ -@SuppressWarnings("visibilitymodifier") -public abstract class Handler { - - protected final Configuration conf; - protected final ContainerSet containerSet; - protected final VolumeSet volumeSet; - protected String scmID; - protected final ContainerMetrics metrics; - - private final StateContext context; - private final DatanodeDetails datanodeDetails; - - protected Handler(Configuration config, StateContext context, - ContainerSet contSet, VolumeSet volumeSet, - ContainerMetrics containerMetrics) { - this.conf = config; - this.context = context; - this.containerSet = contSet; - this.volumeSet = volumeSet; - this.metrics = containerMetrics; - this.datanodeDetails = context.getParent().getDatanodeDetails(); - } - - public static Handler getHandlerForContainerType( - final ContainerType containerType, final Configuration config, - final StateContext context, final ContainerSet contSet, - final VolumeSet volumeSet, final ContainerMetrics metrics) { - switch (containerType) { - case KeyValueContainer: - return new KeyValueHandler(config, context, contSet, volumeSet, metrics); - default: - throw new IllegalArgumentException("Handler for ContainerType: " + - containerType + "doesn't exist."); - } - } - - /** - * Returns the Id of this datanode. - * @return datanode Id - */ - protected DatanodeDetails getDatanodeDetails() { - return datanodeDetails; - } - /** - * This should be called whenever there is state change. It will trigger - * an ICR to SCM. - * - * @param container Container for which ICR has to be sent - */ - protected void sendICR(final Container container) - throws StorageContainerException { - IncrementalContainerReportProto icr = IncrementalContainerReportProto - .newBuilder() - .addReport(container.getContainerReport()) - .build(); - context.addReport(icr); - context.getParent().triggerHeartbeat(); - } - - public abstract ContainerCommandResponseProto handle( - ContainerCommandRequestProto msg, Container container, - DispatcherContext dispatcherContext); - - /** - * Imports container from a raw input stream. - */ - public abstract Container importContainer( - long containerID, - long maxSize, - String originPipelineId, - String originNodeId, - InputStream rawContainerStream, - TarContainerPacker packer) - throws IOException; - - /** - * Exports container to the output stream. - */ - public abstract void exportContainer( - Container container, - OutputStream outputStream, - TarContainerPacker packer) - throws IOException; - - /** - * Stop the Handler. - */ - public abstract void stop(); - - /** - * Marks the container for closing. Moves the container to CLOSING state. - * - * @param container container to update - * @throws IOException in case of exception - */ - public abstract void markContainerForClose(Container container) - throws IOException; - - /** - * Marks the container Unhealthy. Moves the container to UHEALTHY state. - * - * @param container container to update - * @throws IOException in case of exception - */ - public abstract void markContainerUnhealthy(Container container) - throws IOException; - - /** - * Moves the Container to QUASI_CLOSED state. - * - * @param container container to be quasi closed - * @throws IOException - */ - public abstract void quasiCloseContainer(Container container) - throws IOException; - - /** - * Moves the Container to CLOSED state. - * - * @param container container to be closed - * @throws IOException - */ - public abstract void closeContainer(Container container) - throws IOException; - - /** - * Deletes the given container. - * - * @param container container to be deleted - * @param force if this is set to true, we delete container without checking - * state of the container. - * @throws IOException - */ - public abstract void deleteContainer(Container container, boolean force) - throws IOException; - - public void setScmID(String scmId) { - this.scmID = scmId; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java deleted file mode 100644 index fd063678137dd..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.interfaces; - -/** - * Contract to define properties available on the JMX interface. - */ -public interface StorageLocationReportMXBean { - - String getId(); - - boolean isFailed(); - - long getCapacity(); - - long getScmUsed(); - - long getRemaining(); - - String getStorageLocation(); - - String getStorageTypeName(); - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/VolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/VolumeChoosingPolicy.java deleted file mode 100644 index 7de0e2a967d1f..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/VolumeChoosingPolicy.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.interfaces; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; - -import java.io.IOException; -import java.util.List; - -/** - * This interface specifies the policy for choosing volumes to store replicas. - */ -@InterfaceAudience.Private -public interface VolumeChoosingPolicy { - - /** - * Choose a volume to place a container, - * given a list of volumes and the max container size sought for storage. - * - * The implementations of this interface must be thread-safe. - * - * @param volumes - a list of available volumes. - * @param maxContainerSize - the maximum size of the container for which a - * volume is sought. - * @return the chosen volume. - * @throws IOException when disks are unavailable or are full. - */ - HddsVolume chooseVolume(List volumes, long maxContainerSize) - throws IOException; -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java deleted file mode 100644 index d83bf95c36258..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.interfaces; -/** - This package contains common ozone container interfaces. - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java deleted file mode 100644 index 1638a36a13d14..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common; -/** - Common Container Layer. At this layer the abstractions are: - - 1. Containers - Both data and metadata containers. - 2. Keys - Key/Value pairs that live inside a container. - 3. Chunks - Keys can be composed of many chunks. - - Ozone uses these abstractions to build Volumes, Buckets and Keys. - - **/ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java deleted file mode 100644 index f52387be19cbb..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java +++ /dev/null @@ -1,87 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.report; - -import java.util.Iterator; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatus.Status; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.ozone.protocol.commands.CommandStatus; - -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_COMMAND_STATUS_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT; - -/** - * Publishes CommandStatusReport which will be sent to SCM as part of - * heartbeat. CommandStatusReport consist of the following information: - * - type : type of command. - * - status : status of command execution (PENDING, EXECUTED, FAILURE). - * - cmdId : Command id. - * - msg : optional message. - */ -public class CommandStatusReportPublisher extends - ReportPublisher { - - private long cmdStatusReportInterval = -1; - - @Override - protected long getReportFrequency() { - if (cmdStatusReportInterval == -1) { - cmdStatusReportInterval = getConf().getTimeDuration( - HDDS_COMMAND_STATUS_REPORT_INTERVAL, - HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - - long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval( - getConf()); - - Preconditions.checkState( - heartbeatFrequency <= cmdStatusReportInterval, - HDDS_COMMAND_STATUS_REPORT_INTERVAL + - " cannot be configured lower than heartbeat frequency."); - } - return cmdStatusReportInterval; - } - - @Override - protected CommandStatusReportsProto getReport() { - Map map = this.getContext() - .getCommandStatusMap(); - Iterator iterator = map.keySet().iterator(); - CommandStatusReportsProto.Builder builder = CommandStatusReportsProto - .newBuilder(); - - iterator.forEachRemaining(key -> { - CommandStatus cmdStatus = map.get(key); - // If status is still pending then don't remove it from map as - // CommandHandler will change its status when it works on this command. - if (!cmdStatus.getStatus().equals(Status.PENDING)) { - builder.addCmdStatus(cmdStatus.getProtoBufMessage()); - map.remove(key); - } - }); - return builder.getCmdStatusCount() > 0 ? builder.build() : null; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java deleted file mode 100644 index b92e3b0e1f42f..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.HddsServerUtil; - -import java.io.IOException; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT; - - -/** - * Publishes ContainerReport which will be sent to SCM as part of heartbeat. - * ContainerReport consist of the following information about each containers: - * - containerID - * - size - * - used - * - keyCount - * - readCount - * - writeCount - * - readBytes - * - writeBytes - * - finalHash - * - LifeCycleState - * - */ -public class ContainerReportPublisher extends - ReportPublisher { - - private Long containerReportInterval = null; - - @Override - protected long getReportFrequency() { - if (containerReportInterval == null) { - containerReportInterval = getConf().getTimeDuration( - HDDS_CONTAINER_REPORT_INTERVAL, - HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - - long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval( - getConf()); - - Preconditions.checkState( - heartbeatFrequency <= containerReportInterval, - HDDS_CONTAINER_REPORT_INTERVAL + - " cannot be configured lower than heartbeat frequency."); - } - // Add a random delay (0~30s) on top of the container report - // interval (60s) so tha the SCM is overwhelmed by the container reports - // sent in sync. - return containerReportInterval + getRandomReportDelay(); - } - - private long getRandomReportDelay() { - return RandomUtils.nextLong(0, containerReportInterval); - } - - @Override - protected ContainerReportsProto getReport() throws IOException { - return getContext().getParent().getContainer() - .getController().getContainerReport(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java deleted file mode 100644 index 6ac99dd4d32ba..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.scm.HddsServerUtil; - -import java.io.IOException; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_NODE_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_NODE_REPORT_INTERVAL_DEFAULT; - -/** - * Publishes NodeReport which will be sent to SCM as part of heartbeat. - * NodeReport consist of: - * - NodeIOStats - * - VolumeReports - */ -public class NodeReportPublisher extends ReportPublisher { - - private Long nodeReportInterval; - - @Override - protected long getReportFrequency() { - if (nodeReportInterval == null) { - nodeReportInterval = getConf().getTimeDuration( - HDDS_NODE_REPORT_INTERVAL, - HDDS_NODE_REPORT_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - - long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval( - getConf()); - - Preconditions.checkState( - heartbeatFrequency <= nodeReportInterval, - HDDS_NODE_REPORT_INTERVAL + - " cannot be configured lower than heartbeat frequency."); - } - return nodeReportInterval; - } - - @Override - protected NodeReportProto getReport() throws IOException { - return getContext().getParent().getContainer().getNodeReport(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java deleted file mode 100644 index e7f4347e9e436..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.HddsServerUtil; - -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT; - - -/** - * Publishes Pipeline which will be sent to SCM as part of heartbeat. - * PipelineReport consist of the following information about each containers: - * - pipelineID - * - */ -public class PipelineReportPublisher extends - ReportPublisher { - - private Long pipelineReportInterval = null; - - @Override - protected long getReportFrequency() { - if (pipelineReportInterval == null) { - pipelineReportInterval = getConf().getTimeDuration( - HDDS_PIPELINE_REPORT_INTERVAL, - HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - - long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval( - getConf()); - - Preconditions.checkState( - heartbeatFrequency <= pipelineReportInterval, - HDDS_PIPELINE_REPORT_INTERVAL + - " cannot be configured lower than heartbeat frequency."); - } - // Add a random delay (0~30s) on top of the pipeline report - // interval (60s) so tha the SCM is overwhelmed by the pipeline reports - // sent in sync. - return pipelineReportInterval + getRandomReportDelay(); - } - - private long getRandomReportDelay() { - return RandomUtils.nextLong(0, pipelineReportInterval); - } - - @Override - protected PipelineReportsProto getReport() { - return getContext().getParent().getContainer().getPipelineReport(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java deleted file mode 100644 index 536d4cc06b327..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java +++ /dev/null @@ -1,158 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -/** - * ReportManager is responsible for managing all the {@link ReportPublisher} - * and also provides {@link ScheduledExecutorService} to ReportPublisher - * which should be used for scheduling the reports. - */ -public final class ReportManager { - private static final Logger LOG = - LoggerFactory.getLogger(ReportManager.class); - - private final StateContext context; - private final List publishers; - private final ScheduledExecutorService executorService; - - /** - * Construction of {@link ReportManager} should be done via - * {@link ReportManager.Builder}. - * - * @param context StateContext which holds the report - * @param publishers List of publishers which generates report - */ - private ReportManager(StateContext context, - List publishers) { - this.context = context; - this.publishers = publishers; - this.executorService = HadoopExecutors.newScheduledThreadPool( - publishers.size(), - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Datanode ReportManager Thread - %d").build()); - } - - /** - * Initializes ReportManager, also initializes all the configured - * report publishers. - */ - public void init() { - for (ReportPublisher publisher : publishers) { - publisher.init(context, executorService); - } - } - - /** - * Shutdown the ReportManager. - */ - public void shutdown() { - executorService.shutdown(); - try { - executorService.awaitTermination(5, TimeUnit.SECONDS); - } catch (Exception e) { - LOG.error("Failed to shutdown Report Manager", e); - } - } - - /** - * Returns new {@link ReportManager.Builder} which can be used to construct. - * {@link ReportManager} - * @param conf - Conf - * @return builder - Builder. - */ - public static Builder newBuilder(Configuration conf) { - return new Builder(conf); - } - - /** - * Builder to construct {@link ReportManager}. - */ - public static final class Builder { - - private StateContext stateContext; - private List reportPublishers; - private ReportPublisherFactory publisherFactory; - - - private Builder(Configuration conf) { - this.reportPublishers = new ArrayList<>(); - this.publisherFactory = new ReportPublisherFactory(conf); - } - - /** - * Sets the {@link StateContext}. - * - * @param context StateContext - - * @return ReportManager.Builder - */ - public Builder setStateContext(StateContext context) { - stateContext = context; - return this; - } - - /** - * Adds publisher for the corresponding report. - * - * @param report report for which publisher needs to be added - * - * @return ReportManager.Builder - */ - public Builder addPublisherFor(Class report) { - reportPublishers.add(publisherFactory.getPublisherFor(report)); - return this; - } - - /** - * Adds new ReportPublisher to the ReportManager. - * - * @param publisher ReportPublisher - * - * @return ReportManager.Builder - */ - public Builder addPublisher(ReportPublisher publisher) { - reportPublishers.add(publisher); - return this; - } - - /** - * Build and returns ReportManager. - * - * @return {@link ReportManager} - */ - public ReportManager build() { - Preconditions.checkNotNull(stateContext); - return new ReportManager(stateContext, reportPublishers); - } - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java deleted file mode 100644 index e3910dbda1ae7..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java +++ /dev/null @@ -1,115 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine.DatanodeStates; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -/** - * Abstract class responsible for scheduling the reports based on the - * configured interval. All the ReportPublishers should extend this class. - */ -public abstract class ReportPublisher - implements Configurable, Runnable { - - private static final Logger LOG = LoggerFactory.getLogger( - ReportPublisher.class); - - private Configuration config; - private StateContext context; - private ScheduledExecutorService executor; - - /** - * Initializes ReportPublisher with stateContext and executorService. - * - * @param stateContext Datanode state context - * @param executorService ScheduledExecutorService to schedule reports - */ - public void init(StateContext stateContext, - ScheduledExecutorService executorService) { - this.context = stateContext; - this.executor = executorService; - this.executor.schedule(this, - getReportFrequency(), TimeUnit.MILLISECONDS); - } - - @Override - public void setConf(Configuration conf) { - config = conf; - } - - @Override - public Configuration getConf() { - return config; - } - - @Override - public void run() { - publishReport(); - if (!executor.isShutdown() || - !(context.getState() == DatanodeStates.SHUTDOWN)) { - executor.schedule(this, - getReportFrequency(), TimeUnit.MILLISECONDS); - } - } - - /** - * Generates and publishes the report to datanode state context. - */ - private void publishReport() { - try { - context.addReport(getReport()); - } catch (IOException e) { - LOG.error("Exception while publishing report.", e); - } - } - - /** - * Returns the frequency in which this particular report has to be scheduled. - * - * @return report interval in milliseconds - */ - protected abstract long getReportFrequency(); - - /** - * Generate and returns the report which has to be sent as part of heartbeat. - * - * @return datanode report - */ - protected abstract T getReport() throws IOException; - - /** - * Returns {@link StateContext}. - * - * @return stateContext report - */ - protected StateContext getContext() { - return context; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java deleted file mode 100644 index 1c456a0519b49..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.util.ReflectionUtils; - -import java.util.HashMap; -import java.util.Map; - -/** - * Factory class to construct {@link ReportPublisher} for a report. - */ -public class ReportPublisherFactory { - - private final Configuration conf; - private final Map, - Class> report2publisher; - - /** - * Constructs {@link ReportPublisherFactory} instance. - * - * @param conf Configuration to be passed to the {@link ReportPublisher} - */ - public ReportPublisherFactory(Configuration conf) { - this.conf = conf; - this.report2publisher = new HashMap<>(); - - report2publisher.put(NodeReportProto.class, NodeReportPublisher.class); - report2publisher.put(ContainerReportsProto.class, - ContainerReportPublisher.class); - report2publisher.put(CommandStatusReportsProto.class, - CommandStatusReportPublisher.class); - report2publisher.put(PipelineReportsProto.class, - PipelineReportPublisher.class); - } - - /** - * Returns the ReportPublisher for the corresponding report. - * - * @param report report - * - * @return report publisher - */ - public ReportPublisher getPublisherFor( - Class report) { - Class publisherClass = - report2publisher.get(report); - if (publisherClass == null) { - throw new RuntimeException("No publisher found for report " + report); - } - return ReflectionUtils.newInstance(publisherClass, conf); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java deleted file mode 100644 index 404b37a7b08e9..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.report; -/** - * Datanode Reports: As part of heartbeat, datanode has to share its current - * state with SCM. The state of datanode is split into multiple reports which - * are sent along with heartbeat in a configured frequency. - * - * This package contains code which is responsible for sending reports from - * datanode to SCM. - * - * ReportPublisherFactory: Given a report this constructs corresponding - * {@link org.apache.hadoop.ozone.container.common.report.ReportPublisher}. - * - * ReportManager: Manages and initializes all the available ReportPublishers. - * - * ReportPublisher: Abstract class responsible for scheduling the reports - * based on the configured interval. All the ReportPublishers should extend - * {@link org.apache.hadoop.ozone.container.common.report.ReportPublisher} - * - * How to add new report: - * - * 1. Create a new ReportPublisher class which extends - * {@link org.apache.hadoop.ozone.container.common.report.ReportPublisher}. - * - * 2. Add a mapping Report to ReportPublisher entry in ReportPublisherFactory. - * - * 3. In DatanodeStateMachine add the report to ReportManager instance. - * - * - * - * Datanode Reports State Diagram: - * - * DatanodeStateMachine ReportManager ReportPublisher SCM - * | | | | - * | | | | - * | construct | | | - * |----------------->| | | - * | | | | - * | init | | | - * |----------------->| | | - * | | init | | - * | |------------->| | - * | | | | - * +--------+------------------+--------------+--------------------+------+ - * |loop | | | | | - * | | | publish | | | - * | |<-----------------+--------------| | | - * | | | report | | | - * | | | | | | - * | | | | | | - * | | heartbeat(rpc) | | | | - * | |------------------+--------------+------------------->| | - * | | | | | | - * | | | | | | - * +--------+------------------+--------------+--------------------+------+ - * | | | | - * | | | | - * | | | | - * | shutdown | | | - * |----------------->| | | - * | | | | - * | | | | - * - - - - - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java deleted file mode 100644 index c9eb7024eaf18..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ /dev/null @@ -1,489 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine; - -import java.io.Closeable; -import java.io.IOException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.ozone.HddsDatanodeStopService; -import org.apache.hadoop.ozone.container.common.report.ReportManager; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .CloseContainerCommandHandler; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .CommandDispatcher; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .DeleteBlocksCommandHandler; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .DeleteContainerCommandHandler; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .ReplicateContainerCommandHandler; -import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.container.replication.ContainerReplicator; -import org.apache.hadoop.ozone.container.replication.DownloadAndImportReplicator; -import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor; -import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.util.JvmPauseMonitor; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopExecutors; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * State Machine Class. - */ -public class DatanodeStateMachine implements Closeable { - @VisibleForTesting - static final Logger LOG = - LoggerFactory.getLogger(DatanodeStateMachine.class); - private final ExecutorService executorService; - private final Configuration conf; - private final SCMConnectionManager connectionManager; - private StateContext context; - private final OzoneContainer container; - private DatanodeDetails datanodeDetails; - private final CommandDispatcher commandDispatcher; - private final ReportManager reportManager; - private long commandsHandled; - private AtomicLong nextHB; - private Thread stateMachineThread = null; - private Thread cmdProcessThread = null; - private final ReplicationSupervisor supervisor; - - private JvmPauseMonitor jvmPauseMonitor; - private CertificateClient dnCertClient; - private final HddsDatanodeStopService hddsDatanodeStopService; - - /** - * Constructs a a datanode state machine. - * @param datanodeDetails - DatanodeDetails used to identify a datanode - * @param conf - Configuration. - * @param certClient - Datanode Certificate client, required if security is - * enabled - */ - public DatanodeStateMachine(DatanodeDetails datanodeDetails, - Configuration conf, CertificateClient certClient, - HddsDatanodeStopService hddsDatanodeStopService) throws IOException { - this.hddsDatanodeStopService = hddsDatanodeStopService; - this.conf = conf; - this.datanodeDetails = datanodeDetails; - executorService = HadoopExecutors.newCachedThreadPool( - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Datanode State Machine Thread - %d").build()); - connectionManager = new SCMConnectionManager(conf); - context = new StateContext(this.conf, DatanodeStates.getInitState(), this); - container = new OzoneContainer(this.datanodeDetails, - new OzoneConfiguration(conf), context, certClient); - dnCertClient = certClient; - nextHB = new AtomicLong(Time.monotonicNow()); - - ContainerReplicator replicator = - new DownloadAndImportReplicator(container.getContainerSet(), - container.getController(), - new SimpleContainerDownloader(conf), new TarContainerPacker()); - - supervisor = - new ReplicationSupervisor(container.getContainerSet(), replicator, 10); - - // When we add new handlers just adding a new handler here should do the - // trick. - commandDispatcher = CommandDispatcher.newBuilder() - .addHandler(new CloseContainerCommandHandler()) - .addHandler(new DeleteBlocksCommandHandler(container.getContainerSet(), - conf)) - .addHandler(new ReplicateContainerCommandHandler(conf, supervisor)) - .addHandler(new DeleteContainerCommandHandler()) - .setConnectionManager(connectionManager) - .setContainer(container) - .setContext(context) - .build(); - - reportManager = ReportManager.newBuilder(conf) - .setStateContext(context) - .addPublisherFor(NodeReportProto.class) - .addPublisherFor(ContainerReportsProto.class) - .addPublisherFor(CommandStatusReportsProto.class) - .addPublisherFor(PipelineReportsProto.class) - .build(); - } - - /** - * - * Return DatanodeDetails if set, return null otherwise. - * - * @return DatanodeDetails - */ - public DatanodeDetails getDatanodeDetails() { - return datanodeDetails; - } - - - /** - * Returns the Connection manager for this state machine. - * - * @return - SCMConnectionManager. - */ - public SCMConnectionManager getConnectionManager() { - return connectionManager; - } - - public OzoneContainer getContainer() { - return this.container; - } - - /** - * Runs the state machine at a fixed frequency. - */ - private void start() throws IOException { - long now = 0; - - reportManager.init(); - initCommandHandlerThread(conf); - - // Start jvm monitor - jvmPauseMonitor = new JvmPauseMonitor(); - jvmPauseMonitor.init(conf); - jvmPauseMonitor.start(); - - while (context.getState() != DatanodeStates.SHUTDOWN) { - try { - LOG.debug("Executing cycle Number : {}", context.getExecutionCount()); - long heartbeatFrequency = context.getHeartbeatFrequency(); - nextHB.set(Time.monotonicNow() + heartbeatFrequency); - context.execute(executorService, heartbeatFrequency, - TimeUnit.MILLISECONDS); - now = Time.monotonicNow(); - if (now < nextHB.get()) { - if(!Thread.interrupted()) { - Thread.sleep(nextHB.get() - now); - } - } - } catch (InterruptedException e) { - // Some one has sent interrupt signal, this could be because - // 1. Trigger heartbeat immediately - // 2. Shutdown has be initiated. - } catch (Exception e) { - LOG.error("Unable to finish the execution.", e); - } - } - - // If we have got some exception in stateMachine we set the state to - // shutdown to stop the stateMachine thread. Along with this we should - // also stop the datanode. - if (context.getShutdownOnError()) { - LOG.error("DatanodeStateMachine Shutdown due to an critical error"); - hddsDatanodeStopService.stopService(); - } - } - - /** - * Gets the current context. - * - * @return StateContext - */ - public StateContext getContext() { - return context; - } - - /** - * Sets the current context. - * - * @param context - Context - */ - public void setContext(StateContext context) { - this.context = context; - } - - /** - * Closes this stream and releases any system resources associated with it. If - * the stream is already closed then invoking this method has no effect. - *

- *

As noted in {@link AutoCloseable#close()}, cases where the close may - * fail require careful attention. It is strongly advised to relinquish the - * underlying resources and to internally mark the {@code Closeable} - * as closed, prior to throwing the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - if (stateMachineThread != null) { - stateMachineThread.interrupt(); - } - if (cmdProcessThread != null) { - cmdProcessThread.interrupt(); - } - context.setState(DatanodeStates.getLastState()); - executorService.shutdown(); - try { - if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { - executorService.shutdownNow(); - } - - if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { - LOG.error("Unable to shutdown state machine properly."); - } - } catch (InterruptedException e) { - LOG.error("Error attempting to shutdown.", e); - executorService.shutdownNow(); - Thread.currentThread().interrupt(); - } - - if (connectionManager != null) { - connectionManager.close(); - } - - if(container != null) { - container.stop(); - } - - if (jvmPauseMonitor != null) { - jvmPauseMonitor.stop(); - } - } - - /** - * States that a datanode can be in. GetNextState will move this enum from - * getInitState to getLastState. - */ - public enum DatanodeStates { - INIT(1), - RUNNING(2), - SHUTDOWN(3); - private final int value; - - /** - * Constructs states. - * - * @param value Enum Value - */ - DatanodeStates(int value) { - this.value = value; - } - - /** - * Returns the first State. - * - * @return First State. - */ - public static DatanodeStates getInitState() { - return INIT; - } - - /** - * The last state of endpoint states. - * - * @return last state. - */ - public static DatanodeStates getLastState() { - return SHUTDOWN; - } - - /** - * returns the numeric value associated with the endPoint. - * - * @return int. - */ - public int getValue() { - return value; - } - - /** - * Returns the next logical state that endPoint should move to. This - * function assumes the States are sequentially numbered. - * - * @return NextState. - */ - public DatanodeStates getNextState() { - if (this.value < getLastState().getValue()) { - int stateValue = this.getValue() + 1; - for (DatanodeStates iter : values()) { - if (stateValue == iter.getValue()) { - return iter; - } - } - } - return getLastState(); - } - } - - /** - * Start datanode state machine as a single thread daemon. - */ - public void startDaemon() { - Runnable startStateMachineTask = () -> { - try { - start(); - LOG.info("Ozone container server started."); - } catch (Exception ex) { - LOG.error("Unable to start the DatanodeState Machine", ex); - } - }; - stateMachineThread = new ThreadFactoryBuilder() - .setDaemon(true) - .setNameFormat("Datanode State Machine Thread - %d") - .build().newThread(startStateMachineTask); - stateMachineThread.start(); - } - - /** - * Calling this will immediately trigger a heartbeat to the SCMs. - * This heartbeat will also include all the reports which are ready to - * be sent by datanode. - */ - public void triggerHeartbeat() { - stateMachineThread.interrupt(); - } - - /** - * Waits for DatanodeStateMachine to exit. - * - * @throws InterruptedException - */ - public void join() throws InterruptedException { - if (stateMachineThread != null) { - stateMachineThread.join(); - } - - if (cmdProcessThread != null) { - cmdProcessThread.join(); - } - } - - /** - * Stop the daemon thread of the datanode state machine. - */ - public synchronized void stopDaemon() { - try { - supervisor.stop(); - context.setState(DatanodeStates.SHUTDOWN); - reportManager.shutdown(); - this.close(); - LOG.info("Ozone container server stopped."); - } catch (IOException e) { - LOG.error("Stop ozone container server failed.", e); - } - } - - /** - * - * Check if the datanode state machine daemon is stopped. - * - * @return True if datanode state machine daemon is stopped - * and false otherwise. - */ - @VisibleForTesting - public boolean isDaemonStopped() { - return this.executorService.isShutdown() - && this.getContext().getState() == DatanodeStates.SHUTDOWN; - } - - /** - * Create a command handler thread. - * - * @param config - */ - private void initCommandHandlerThread(Configuration config) { - - /** - * Task that periodically checks if we have any outstanding commands. - * It is assumed that commands can be processed slowly and in order. - * This assumption might change in future. Right now due to this assumption - * we have single command queue process thread. - */ - Runnable processCommandQueue = () -> { - long now; - while (getContext().getState() != DatanodeStates.SHUTDOWN) { - SCMCommand command = getContext().getNextCommand(); - if (command != null) { - commandDispatcher.handle(command); - commandsHandled++; - } else { - try { - // Sleep till the next HB + 1 second. - now = Time.monotonicNow(); - if (nextHB.get() > now) { - Thread.sleep((nextHB.get() - now) + 1000L); - } - } catch (InterruptedException e) { - // Ignore this exception. - } - } - } - }; - - // We will have only one thread for command processing in a datanode. - cmdProcessThread = getCommandHandlerThread(processCommandQueue); - cmdProcessThread.start(); - } - - private Thread getCommandHandlerThread(Runnable processCommandQueue) { - Thread handlerThread = new Thread(processCommandQueue); - handlerThread.setDaemon(true); - handlerThread.setName("Command processor thread"); - handlerThread.setUncaughtExceptionHandler((Thread t, Throwable e) -> { - // Let us just restart this thread after logging a critical error. - // if this thread is not running we cannot handle commands from SCM. - LOG.error("Critical Error : Command processor thread encountered an " + - "error. Thread: {}", t.toString(), e); - getCommandHandlerThread(processCommandQueue).start(); - }); - return handlerThread; - } - - /** - * Returns the number of commands handled by the datanode. - * @return count - */ - @VisibleForTesting - public long getCommandHandled() { - return commandsHandled; - } - - /** - * returns the Command Dispatcher. - * @return CommandDispatcher - */ - @VisibleForTesting - public CommandDispatcher getCommandDispatcher() { - return commandDispatcher; - } - - @VisibleForTesting - public ReplicationSupervisor getSupervisor() { - return supervisor; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java deleted file mode 100644 index f0064ec5d740e..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java +++ /dev/null @@ -1,296 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.ozone.protocolPB - .StorageContainerDatanodeProtocolClientSideTranslatorPB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.time.ZonedDateTime; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -import static org.apache.hadoop.hdds.scm.HddsServerUtil.getLogWarnInterval; -import static org.apache.hadoop.hdds.scm.HddsServerUtil.getScmHeartbeatInterval; - -/** - * Endpoint is used as holder class that keeps state around the RPC endpoint. - */ -public class EndpointStateMachine - implements Closeable, EndpointStateMachineMBean { - static final Logger - LOG = LoggerFactory.getLogger(EndpointStateMachine.class); - private final StorageContainerDatanodeProtocolClientSideTranslatorPB endPoint; - private final AtomicLong missedCount; - private final InetSocketAddress address; - private final Lock lock; - private final Configuration conf; - private EndPointStates state; - private VersionResponse version; - private ZonedDateTime lastSuccessfulHeartbeat; - - /** - * Constructs RPC Endpoints. - * - * @param endPoint - RPC endPoint. - */ - public EndpointStateMachine(InetSocketAddress address, - StorageContainerDatanodeProtocolClientSideTranslatorPB endPoint, - Configuration conf) { - this.endPoint = endPoint; - this.missedCount = new AtomicLong(0); - this.address = address; - state = EndPointStates.getInitState(); - lock = new ReentrantLock(); - this.conf = conf; - } - - /** - * Takes a lock on this EndPoint so that other threads don't use this while we - * are trying to communicate via this endpoint. - */ - public void lock() { - lock.lock(); - } - - /** - * Unlocks this endpoint. - */ - public void unlock() { - lock.unlock(); - } - - /** - * Returns the version that we read from the server if anyone asks . - * - * @return - Version Response. - */ - public VersionResponse getVersion() { - return version; - } - - /** - * Sets the Version reponse we recieved from the SCM. - * - * @param version VersionResponse - */ - public void setVersion(VersionResponse version) { - this.version = version; - } - - /** - * Returns the current State this end point is in. - * - * @return - getState. - */ - public EndPointStates getState() { - return state; - } - - @Override - public int getVersionNumber() { - if (version != null) { - return version.getProtobufMessage().getSoftwareVersion(); - } else { - return -1; - } - } - - /** - * Sets the endpoint state. - * - * @param epState - end point state. - */ - public EndPointStates setState(EndPointStates epState) { - this.state = epState; - return this.state; - } - - /** - * Closes the connection. - * - * @throws IOException - */ - @Override - public void close() throws IOException { - if (endPoint != null) { - endPoint.close(); - } - } - - /** - * We maintain a count of how many times we missed communicating with a - * specific SCM. This is not made atomic since the access to this is always - * guarded by the read or write lock. That is, it is serialized. - */ - public void incMissed() { - this.missedCount.incrementAndGet(); - } - - /** - * Returns the value of the missed count. - * - * @return int - */ - public long getMissedCount() { - return this.missedCount.get(); - } - - @Override - public String getAddressString() { - return getAddress().toString(); - } - - public void zeroMissedCount() { - this.missedCount.set(0); - } - - /** - * Returns the InetAddress of the endPoint. - * - * @return - EndPoint. - */ - public InetSocketAddress getAddress() { - return this.address; - } - - /** - * Returns real RPC endPoint. - * - * @return rpc client. - */ - public StorageContainerDatanodeProtocolClientSideTranslatorPB - getEndPoint() { - return endPoint; - } - - /** - * Returns the string that represents this endpoint. - * - * @return - String - */ - public String toString() { - return address.toString(); - } - - /** - * Logs exception if needed. - * @param ex - Exception - */ - public void logIfNeeded(Exception ex) { - if (this.getMissedCount() % getLogWarnInterval(conf) == 0) { - LOG.error( - "Unable to communicate to SCM server at {} for past {} seconds.", - this.getAddress().getHostString() + ":" + this.getAddress().getPort(), - TimeUnit.MILLISECONDS.toSeconds( - this.getMissedCount() * getScmHeartbeatInterval(this.conf)), ex); - } - if (LOG.isTraceEnabled()) { - LOG.trace("Incrementing the Missed count. Ex : {}", ex); - } - this.incMissed(); - } - - - /** - * States that an Endpoint can be in. - *

- * This is a sorted list of states that EndPoint will traverse. - *

- * GetNextState will move this enum from getInitState to getLastState. - */ - public enum EndPointStates { - GETVERSION(1), - REGISTER(2), - HEARTBEAT(3), - SHUTDOWN(4); // if you add value after this please edit getLastState too. - private final int value; - - /** - * Constructs endPointStates. - * - * @param value state. - */ - EndPointStates(int value) { - this.value = value; - } - - /** - * Returns the first State. - * - * @return First State. - */ - public static EndPointStates getInitState() { - return GETVERSION; - } - - /** - * The last state of endpoint states. - * - * @return last state. - */ - public static EndPointStates getLastState() { - return SHUTDOWN; - } - - /** - * returns the numeric value associated with the endPoint. - * - * @return int. - */ - public int getValue() { - return value; - } - - /** - * Returns the next logical state that endPoint should move to. - * The next state is computed by adding 1 to the current state. - * - * @return NextState. - */ - public EndPointStates getNextState() { - if (this.getValue() < getLastState().getValue()) { - int stateValue = this.getValue() + 1; - for (EndPointStates iter : values()) { - if (stateValue == iter.getValue()) { - return iter; - } - } - } - return getLastState(); - } - } - - public long getLastSuccessfulHeartbeat() { - return lastSuccessfulHeartbeat == null ? - 0 : - lastSuccessfulHeartbeat.toEpochSecond(); - } - - public void setLastSuccessfulHeartbeat( - ZonedDateTime lastSuccessfulHeartbeat) { - this.lastSuccessfulHeartbeat = lastSuccessfulHeartbeat; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachineMBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachineMBean.java deleted file mode 100644 index 4f64bde0b3e32..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachineMBean.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine; - - -/** - * JMX representation of an EndpointStateMachine. - */ -public interface EndpointStateMachineMBean { - - long getMissedCount(); - - String getAddressString(); - - EndpointStateMachine.EndPointStates getState(); - - int getVersionNumber(); - - long getLastSuccessfulHeartbeat(); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java deleted file mode 100644 index ce31ebdf4d6e2..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java +++ /dev/null @@ -1,221 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.protocolPB - .StorageContainerDatanodeProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.ObjectName; -import java.io.Closeable; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import static java.util.Collections.unmodifiableList; -import static org.apache.hadoop.hdds.scm.HddsServerUtil - .getScmRpcTimeOutInMilliseconds; - -/** - * SCMConnectionManager - Acts as a class that manages the membership - * information of the SCMs that we are working with. - */ -public class SCMConnectionManager - implements Closeable, SCMConnectionManagerMXBean { - private static final Logger LOG = - LoggerFactory.getLogger(SCMConnectionManager.class); - - private final ReadWriteLock mapLock; - private final Map scmMachines; - - private final int rpcTimeout; - private final Configuration conf; - private ObjectName jmxBean; - - public SCMConnectionManager(Configuration conf) { - this.mapLock = new ReentrantReadWriteLock(); - Long timeOut = getScmRpcTimeOutInMilliseconds(conf); - this.rpcTimeout = timeOut.intValue(); - this.scmMachines = new HashMap<>(); - this.conf = conf; - jmxBean = MBeans.register("HddsDatanode", - "SCMConnectionManager", - this); - } - - - /** - * Returns Config. - * - * @return ozoneConfig. - */ - public Configuration getConf() { - return conf; - } - - /** - * Get RpcTimeout. - * - * @return - Return RPC timeout. - */ - public int getRpcTimeout() { - return rpcTimeout; - } - - - /** - * Takes a read lock. - */ - public void readLock() { - this.mapLock.readLock().lock(); - } - - /** - * Releases the read lock. - */ - public void readUnlock() { - this.mapLock.readLock().unlock(); - } - - /** - * Takes the write lock. - */ - public void writeLock() { - this.mapLock.writeLock().lock(); - } - - /** - * Releases the write lock. - */ - public void writeUnlock() { - this.mapLock.writeLock().unlock(); - } - - /** - * adds a new SCM machine to the target set. - * - * @param address - Address of the SCM machine to send heatbeat to. - * @throws IOException - */ - public void addSCMServer(InetSocketAddress address) throws IOException { - writeLock(); - try { - if (scmMachines.containsKey(address)) { - LOG.warn("Trying to add an existing SCM Machine to Machines group. " + - "Ignoring the request."); - return; - } - RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class, - ProtobufRpcEngine.class); - long version = - RPC.getProtocolVersion(StorageContainerDatanodeProtocolPB.class); - - RetryPolicy retryPolicy = - RetryPolicies.retryForeverWithFixedSleep( - 1000, TimeUnit.MILLISECONDS); - StorageContainerDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy( - StorageContainerDatanodeProtocolPB.class, version, - address, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), getRpcTimeout(), - retryPolicy).getProxy(); - - StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient = - new StorageContainerDatanodeProtocolClientSideTranslatorPB(rpcProxy); - - EndpointStateMachine endPoint = - new EndpointStateMachine(address, rpcClient, conf); - scmMachines.put(address, endPoint); - } finally { - writeUnlock(); - } - } - - /** - * Removes a SCM machine for the target set. - * - * @param address - Address of the SCM machine to send heatbeat to. - * @throws IOException - */ - public void removeSCMServer(InetSocketAddress address) throws IOException { - writeLock(); - try { - if (!scmMachines.containsKey(address)) { - LOG.warn("Trying to remove a non-existent SCM machine. " + - "Ignoring the request."); - return; - } - - EndpointStateMachine endPoint = scmMachines.get(address); - endPoint.close(); - scmMachines.remove(address); - } finally { - writeUnlock(); - } - } - - /** - * Returns all known RPCEndpoints. - * - * @return - List of RPC Endpoints. - */ - public Collection getValues() { - readLock(); - try { - return unmodifiableList(new ArrayList<>(scmMachines.values())); - } finally { - readUnlock(); - } - } - - @Override - public void close() throws IOException { - getValues().forEach(endpointStateMachine - -> IOUtils.cleanupWithLogger(LOG, endpointStateMachine)); - if (jmxBean != null) { - MBeans.unregister(jmxBean); - jmxBean = null; - } - } - - @Override - public List getSCMServers() { - readLock(); - try { - return unmodifiableList(new ArrayList<>(scmMachines.values())); - } finally { - readUnlock(); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManagerMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManagerMXBean.java deleted file mode 100644 index 25ef16379a6f2..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManagerMXBean.java +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine; - -import java.util.List; - -/** - * JMX information about the connected SCM servers. - */ -public interface SCMConnectionManagerMXBean { - - List getSCMServers(); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java deleted file mode 100644 index 2c01f3a73d0c3..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ /dev/null @@ -1,502 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine; - -import com.google.common.base.Preconditions; -import com.google.protobuf.GeneratedMessage; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatus.Status; -import org.apache.hadoop.ozone.container.common.states.DatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode - .InitDatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode - .RunningDatanodeState; -import org.apache.hadoop.ozone.protocol.commands.CommandStatus; -import org.apache.hadoop.ozone.protocol.commands - .DeleteBlockCommandStatus.DeleteBlockCommandStatusBuilder; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; - -import static java.lang.Math.min; -import static org.apache.hadoop.hdds.scm.HddsServerUtil.getScmHeartbeatInterval; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.LinkedList; -import java.util.List; -import java.util.Queue; -import java.util.ArrayList; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Consumer; - -/** - * Current Context of State Machine. - */ -public class StateContext { - static final Logger LOG = - LoggerFactory.getLogger(StateContext.class); - private final Queue commandQueue; - private final Map cmdStatusMap; - private final Lock lock; - private final DatanodeStateMachine parent; - private final AtomicLong stateExecutionCount; - private final Configuration conf; - private final List reports; - private final Queue containerActions; - private final Queue pipelineActions; - private DatanodeStateMachine.DatanodeStates state; - private boolean shutdownOnError = false; - - /** - * Starting with a 2 sec heartbeat frequency which will be updated to the - * real HB frequency after scm registration. With this method the - * initial registration could be significant faster. - */ - private AtomicLong heartbeatFrequency = new AtomicLong(2000); - - /** - * Constructs a StateContext. - * - * @param conf - Configration - * @param state - State - * @param parent Parent State Machine - */ - public StateContext(Configuration conf, DatanodeStateMachine.DatanodeStates - state, DatanodeStateMachine parent) { - this.conf = conf; - this.state = state; - this.parent = parent; - commandQueue = new LinkedList<>(); - cmdStatusMap = new ConcurrentHashMap<>(); - reports = new LinkedList<>(); - containerActions = new LinkedList<>(); - pipelineActions = new LinkedList<>(); - lock = new ReentrantLock(); - stateExecutionCount = new AtomicLong(0); - } - - /** - * Returns the ContainerStateMachine class that holds this state. - * - * @return ContainerStateMachine. - */ - public DatanodeStateMachine getParent() { - return parent; - } - - /** - * Returns true if we are entering a new state. - * - * @return boolean - */ - boolean isEntering() { - return stateExecutionCount.get() == 0; - } - - /** - * Returns true if we are exiting from the current state. - * - * @param newState - newState. - * @return boolean - */ - boolean isExiting(DatanodeStateMachine.DatanodeStates newState) { - boolean isExiting = state != newState && stateExecutionCount.get() > 0; - if(isExiting) { - stateExecutionCount.set(0); - } - return isExiting; - } - - /** - * Returns the current state the machine is in. - * - * @return state. - */ - public DatanodeStateMachine.DatanodeStates getState() { - return state; - } - - /** - * Sets the current state of the machine. - * - * @param state state. - */ - public void setState(DatanodeStateMachine.DatanodeStates state) { - this.state = state; - } - - /** - * Sets the shutdownOnError. This method needs to be called when we - * set DatanodeState to SHUTDOWN when executing a task of a DatanodeState. - * @param value - */ - private void setShutdownOnError(boolean value) { - this.shutdownOnError = value; - } - - /** - * Get shutdownStateMachine. - * @return boolean - */ - public boolean getShutdownOnError() { - return shutdownOnError; - } - /** - * Adds the report to report queue. - * - * @param report report to be added - */ - public void addReport(GeneratedMessage report) { - if (report != null) { - synchronized (reports) { - reports.add(report); - } - } - } - - /** - * Adds the reports which could not be sent by heartbeat back to the - * reports list. - * - * @param reportsToPutBack list of reports which failed to be sent by - * heartbeat. - */ - public void putBackReports(List reportsToPutBack) { - synchronized (reports) { - reports.addAll(0, reportsToPutBack); - } - } - - /** - * Returns all the available reports from the report queue, or empty list if - * the queue is empty. - * - * @return List of reports - */ - public List getAllAvailableReports() { - return getReports(Integer.MAX_VALUE); - } - - /** - * Returns available reports from the report queue with a max limit on - * list size, or empty list if the queue is empty. - * - * @return List of reports - */ - public List getReports(int maxLimit) { - List reportsToReturn = new LinkedList<>(); - synchronized (reports) { - List tempList = reports.subList( - 0, min(reports.size(), maxLimit)); - reportsToReturn.addAll(tempList); - tempList.clear(); - } - return reportsToReturn; - } - - - /** - * Adds the ContainerAction to ContainerAction queue. - * - * @param containerAction ContainerAction to be added - */ - public void addContainerAction(ContainerAction containerAction) { - synchronized (containerActions) { - containerActions.add(containerAction); - } - } - - /** - * Add ContainerAction to ContainerAction queue if it's not present. - * - * @param containerAction ContainerAction to be added - */ - public void addContainerActionIfAbsent(ContainerAction containerAction) { - synchronized (containerActions) { - if (!containerActions.contains(containerAction)) { - containerActions.add(containerAction); - } - } - } - - /** - * Returns all the pending ContainerActions from the ContainerAction queue, - * or empty list if the queue is empty. - * - * @return {@literal List} - */ - public List getAllPendingContainerActions() { - return getPendingContainerAction(Integer.MAX_VALUE); - } - - /** - * Returns pending ContainerActions from the ContainerAction queue with a - * max limit on list size, or empty list if the queue is empty. - * - * @return {@literal List} - */ - public List getPendingContainerAction(int maxLimit) { - List containerActionList = new ArrayList<>(); - synchronized (containerActions) { - if (!containerActions.isEmpty()) { - int size = containerActions.size(); - int limit = size > maxLimit ? maxLimit : size; - for (int count = 0; count < limit; count++) { - // we need to remove the action from the containerAction queue - // as well - ContainerAction action = containerActions.poll(); - Preconditions.checkNotNull(action); - containerActionList.add(action); - } - } - return containerActionList; - } - } - - /** - * Add PipelineAction to PipelineAction queue if it's not present. - * - * @param pipelineAction PipelineAction to be added - */ - public void addPipelineActionIfAbsent(PipelineAction pipelineAction) { - synchronized (pipelineActions) { - /** - * If pipelineAction queue already contains entry for the pipeline id - * with same action, we should just return. - * Note: We should not use pipelineActions.contains(pipelineAction) here - * as, pipelineAction has a msg string. So even if two msgs differ though - * action remains same on the given pipeline, it will end up adding it - * multiple times here. - */ - for (PipelineAction pipelineActionIter : pipelineActions) { - if (pipelineActionIter.getAction() == pipelineAction.getAction() - && pipelineActionIter.hasClosePipeline() && pipelineAction - .hasClosePipeline() - && pipelineActionIter.getClosePipeline().getPipelineID() - .equals(pipelineAction.getClosePipeline().getPipelineID())) { - return; - } - } - pipelineActions.add(pipelineAction); - } - } - - /** - * Returns pending PipelineActions from the PipelineAction queue with a - * max limit on list size, or empty list if the queue is empty. - * - * @return {@literal List} - */ - public List getPendingPipelineAction(int maxLimit) { - List pipelineActionList = new ArrayList<>(); - synchronized (pipelineActions) { - if (!pipelineActions.isEmpty()) { - int size = pipelineActions.size(); - int limit = size > maxLimit ? maxLimit : size; - for (int count = 0; count < limit; count++) { - pipelineActionList.add(pipelineActions.poll()); - } - } - return pipelineActionList; - } - } - - /** - * Returns the next task to get executed by the datanode state machine. - * @return A callable that will be executed by the - * {@link DatanodeStateMachine} - */ - @SuppressWarnings("unchecked") - public DatanodeState getTask() { - switch (this.state) { - case INIT: - return new InitDatanodeState(this.conf, parent.getConnectionManager(), - this); - case RUNNING: - return new RunningDatanodeState(this.conf, parent.getConnectionManager(), - this); - case SHUTDOWN: - return null; - default: - throw new IllegalArgumentException("Not Implemented yet."); - } - } - - /** - * Executes the required state function. - * - * @param service - Executor Service - * @param time - seconds to wait - * @param unit - Seconds. - * @throws InterruptedException - * @throws ExecutionException - * @throws TimeoutException - */ - public void execute(ExecutorService service, long time, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { - stateExecutionCount.incrementAndGet(); - DatanodeState task = getTask(); - - // Adding not null check, in a case where datanode is still starting up, but - // we called stop DatanodeStateMachine, this sets state to SHUTDOWN, and - // there is a chance of getting task as null. - if (task != null) { - if (this.isEntering()) { - task.onEnter(); - } - task.execute(service); - DatanodeStateMachine.DatanodeStates newState = task.await(time, unit); - if (this.state != newState) { - if (LOG.isDebugEnabled()) { - LOG.debug("Task {} executed, state transited from {} to {}", - task.getClass().getSimpleName(), this.state, newState); - } - if (isExiting(newState)) { - task.onExit(); - } - this.setState(newState); - } - - if (this.state == DatanodeStateMachine.DatanodeStates.SHUTDOWN) { - LOG.error("Critical error occurred in StateMachine, setting " + - "shutDownMachine"); - // When some exception occurred, set shutdownStateMachine to true, so - // that we can terminate the datanode. - setShutdownOnError(true); - } - } - } - - /** - * Returns the next command or null if it is empty. - * - * @return SCMCommand or Null. - */ - public SCMCommand getNextCommand() { - lock.lock(); - try { - return commandQueue.poll(); - } finally { - lock.unlock(); - } - } - - /** - * Adds a command to the State Machine queue. - * - * @param command - SCMCommand. - */ - public void addCommand(SCMCommand command) { - lock.lock(); - try { - commandQueue.add(command); - } finally { - lock.unlock(); - } - this.addCmdStatus(command); - } - - /** - * Returns the count of the Execution. - * @return long - */ - public long getExecutionCount() { - return stateExecutionCount.get(); - } - - /** - * Returns the next {@link CommandStatus} or null if it is empty. - * - * @return {@link CommandStatus} or Null. - */ - public CommandStatus getCmdStatus(Long key) { - return cmdStatusMap.get(key); - } - - /** - * Adds a {@link CommandStatus} to the State Machine. - * - * @param status - {@link CommandStatus}. - */ - public void addCmdStatus(Long key, CommandStatus status) { - cmdStatusMap.put(key, status); - } - - /** - * Adds a {@link CommandStatus} to the State Machine for given SCMCommand. - * - * @param cmd - {@link SCMCommand}. - */ - public void addCmdStatus(SCMCommand cmd) { - if (cmd.getType() == SCMCommandProto.Type.deleteBlocksCommand) { - addCmdStatus(cmd.getId(), - DeleteBlockCommandStatusBuilder.newBuilder() - .setCmdId(cmd.getId()) - .setStatus(Status.PENDING) - .setType(cmd.getType()) - .build()); - } - } - - /** - * Get map holding all {@link CommandStatus} objects. - * - */ - public Map getCommandStatusMap() { - return cmdStatusMap; - } - - /** - * Updates status of a pending status command. - * @param cmdId command id - * @param cmdStatusUpdater Consumer to update command status. - * @return true if command status updated successfully else false. - */ - public boolean updateCommandStatus(Long cmdId, - Consumer cmdStatusUpdater) { - if(cmdStatusMap.containsKey(cmdId)) { - cmdStatusUpdater.accept(cmdStatusMap.get(cmdId)); - return true; - } - return false; - } - - public void configureHeartbeatFrequency(){ - heartbeatFrequency.set(getScmHeartbeatInterval(conf)); - } - - /** - * Return current heartbeat frequency in ms. - */ - public long getHeartbeatFrequency() { - return heartbeatFrequency.get(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java deleted file mode 100644 index 2dec08fe83c4a..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java +++ /dev/null @@ -1,179 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto - .ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.statemachine - .SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.util.Time; -import org.apache.ratis.protocol.NotLeaderException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * Handler for close container command received from SCM. - */ -public class CloseContainerCommandHandler implements CommandHandler { - - private static final Logger LOG = - LoggerFactory.getLogger(CloseContainerCommandHandler.class); - - private int invocationCount; - private long totalTime; - - /** - * Constructs a ContainerReport handler. - */ - public CloseContainerCommandHandler() { - } - - /** - * Handles a given SCM command. - * - * @param command - SCM Command - * @param ozoneContainer - Ozone Container. - * @param context - Current Context. - * @param connectionManager - The SCMs that we are talking to. - */ - @Override - public void handle(SCMCommand command, OzoneContainer ozoneContainer, - StateContext context, SCMConnectionManager connectionManager) { - LOG.debug("Processing Close Container command."); - invocationCount++; - final long startTime = Time.monotonicNow(); - final DatanodeDetails datanodeDetails = context.getParent() - .getDatanodeDetails(); - final CloseContainerCommandProto closeCommand = - ((CloseContainerCommand)command).getProto(); - final ContainerController controller = ozoneContainer.getController(); - final long containerId = closeCommand.getContainerID(); - try { - final Container container = controller.getContainer(containerId); - - if (container == null) { - LOG.error("Container #{} does not exist in datanode. " - + "Container close failed.", containerId); - return; - } - - // move the container to CLOSING if in OPEN state - controller.markContainerForClose(containerId); - - switch (container.getContainerState()) { - case OPEN: - case CLOSING: - // If the container is part of open pipeline, close it via write channel - if (ozoneContainer.getWriteChannel() - .isExist(closeCommand.getPipelineID())) { - ContainerCommandRequestProto request = - getContainerCommandRequestProto(datanodeDetails, - closeCommand.getContainerID()); - ozoneContainer.getWriteChannel() - .submitRequest(request, closeCommand.getPipelineID()); - } else { - // Container should not exist in CLOSING state without a pipeline - controller.markContainerUnhealthy(containerId); - } - break; - case QUASI_CLOSED: - if (closeCommand.getForce()) { - controller.closeContainer(containerId); - break; - } - case CLOSED: - break; - case UNHEALTHY: - case INVALID: - if (LOG.isDebugEnabled()) { - LOG.debug("Cannot close the container #{}, the container is" - + " in {} state.", containerId, container.getContainerState()); - } - default: - break; - } - } catch (NotLeaderException e) { - LOG.debug("Follower cannot close container #{}.", containerId); - } catch (IOException e) { - LOG.error("Can't close container #{}", containerId, e); - } finally { - long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; - } - } - - private ContainerCommandRequestProto getContainerCommandRequestProto( - final DatanodeDetails datanodeDetails, final long containerId) { - final ContainerCommandRequestProto.Builder command = - ContainerCommandRequestProto.newBuilder(); - command.setCmdType(ContainerProtos.Type.CloseContainer); - command.setTraceID(TracingUtil.exportCurrentSpan()); - command.setContainerID(containerId); - command.setCloseContainer( - ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); - command.setDatanodeUuid(datanodeDetails.getUuidString()); - return command.build(); - } - - /** - * Returns the command type that this command handler handles. - * - * @return Type - */ - @Override - public SCMCommandProto.Type getCommandType() { - return SCMCommandProto.Type.closeContainerCommand; - } - - /** - * Returns number of times this handler has been invoked. - * - * @return int - */ - @Override - public int getInvocationCount() { - return invocationCount; - } - - /** - * Returns the average time this function takes to run. - * - * @return long - */ - @Override - public long getAverageRunTime() { - if (invocationCount > 0) { - return totalTime / invocationCount; - } - return 0; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java deleted file mode 100644 index af854ec3d61a1..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java +++ /dev/null @@ -1,188 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; -import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - -/** - * Dispatches command to the correct handler. - */ -public final class CommandDispatcher { - static final Logger LOG = - LoggerFactory.getLogger(CommandDispatcher.class); - private final StateContext context; - private final Map handlerMap; - private final OzoneContainer container; - private final SCMConnectionManager connectionManager; - - /** - * Constructs a command Dispatcher. - * @param context - Context. - */ - /** - * Constructs a command dispatcher. - * - * @param container - Ozone Container - * @param context - Context - * @param handlers - Set of handlers. - */ - private CommandDispatcher(OzoneContainer container, SCMConnectionManager - connectionManager, StateContext context, - CommandHandler... handlers) { - Preconditions.checkNotNull(context); - Preconditions.checkNotNull(handlers); - Preconditions.checkArgument(handlers.length > 0); - Preconditions.checkNotNull(container); - Preconditions.checkNotNull(connectionManager); - this.context = context; - this.container = container; - this.connectionManager = connectionManager; - handlerMap = new HashMap<>(); - for (CommandHandler h : handlers) { - if(handlerMap.containsKey(h.getCommandType())){ - LOG.error("Duplicate handler for the same command. Exiting. Handle " + - "key : { }", h.getCommandType().getDescriptorForType().getName()); - throw new IllegalArgumentException("Duplicate handler for the same " + - "command."); - } - handlerMap.put(h.getCommandType(), h); - } - } - - public CommandHandler getCloseContainerHandler() { - return handlerMap.get(Type.closeContainerCommand); - } - - @VisibleForTesting - public CommandHandler getDeleteBlocksCommandHandler() { - return handlerMap.get(Type.deleteBlocksCommand); - } - - /** - * Dispatch the command to the correct handler. - * - * @param command - SCM Command. - */ - public void handle(SCMCommand command) { - Preconditions.checkNotNull(command); - CommandHandler handler = handlerMap.get(command.getType()); - if (handler != null) { - handler.handle(command, container, context, connectionManager); - } else { - LOG.error("Unknown SCM Command queued. There is no handler for this " + - "command. Command: {}", command.getType().getDescriptorForType() - .getName()); - } - } - - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Helper class to construct command dispatcher. - */ - public static class Builder { - private final List handlerList; - private OzoneContainer container; - private StateContext context; - private SCMConnectionManager connectionManager; - - public Builder() { - handlerList = new LinkedList<>(); - } - - /** - * Adds a handler. - * - * @param handler - handler - * @return Builder - */ - public Builder addHandler(CommandHandler handler) { - Preconditions.checkNotNull(handler); - handlerList.add(handler); - return this; - } - - /** - * Add the OzoneContainer. - * - * @param ozoneContainer - ozone container. - * @return Builder - */ - public Builder setContainer(OzoneContainer ozoneContainer) { - Preconditions.checkNotNull(ozoneContainer); - this.container = ozoneContainer; - return this; - } - - /** - * Set the Connection Manager. - * - * @param scmConnectionManager - * @return this - */ - public Builder setConnectionManager(SCMConnectionManager - scmConnectionManager) { - Preconditions.checkNotNull(scmConnectionManager); - this.connectionManager = scmConnectionManager; - return this; - } - - /** - * Sets the Context. - * - * @param stateContext - StateContext - * @return this - */ - public Builder setContext(StateContext stateContext) { - Preconditions.checkNotNull(stateContext); - this.context = stateContext; - return this; - } - - /** - * Builds a command Dispatcher. - * @return Command Dispatcher. - */ - public CommandDispatcher build() { - Preconditions.checkNotNull(this.connectionManager, "Missing connection" + - " manager."); - Preconditions.checkNotNull(this.container, "Missing container."); - Preconditions.checkNotNull(this.context, "Missing context."); - Preconditions.checkArgument(this.handlerList.size() > 0); - return new CommandDispatcher(this.container, this.connectionManager, - this.context, handlerList.toArray( - new CommandHandler[handlerList.size()])); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java deleted file mode 100644 index 1ea0ea8451502..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.ozone.container.common.statemachine - .SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.commands.CommandStatus; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.slf4j.Logger; - -import java.util.function.Consumer; - -/** - * Generic interface for handlers. - */ -public interface CommandHandler { - - /** - * Handles a given SCM command. - * @param command - SCM Command - * @param container - Ozone Container. - * @param context - Current Context. - * @param connectionManager - The SCMs that we are talking to. - */ - void handle(SCMCommand command, OzoneContainer container, - StateContext context, SCMConnectionManager connectionManager); - - /** - * Returns the command type that this command handler handles. - * @return Type - */ - SCMCommandProto.Type getCommandType(); - - /** - * Returns number of times this handler has been invoked. - * @return int - */ - int getInvocationCount(); - - /** - * Returns the average time this function takes to run. - * @return long - */ - long getAverageRunTime(); - - /** - * Default implementation for updating command status. - */ - default void updateCommandStatus(StateContext context, SCMCommand command, - Consumer cmdStatusUpdater, Logger log) { - if (!context.updateCommandStatus(command.getId(), cmdStatusUpdater)) { - log.debug("{} with Id:{} not found.", command.getType(), - command.getId()); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java deleted file mode 100644 index cdecf5d7ed470..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ /dev/null @@ -1,281 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto - .DeleteBlockTransactionResult; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers - .DeletedContainerBlocksSummary; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.statemachine - .SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.commands.CommandStatus; -import org.apache.hadoop.ozone.protocol.commands.DeleteBlockCommandStatus; -import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.BatchOperation; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.List; -import java.util.function.Consumer; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.CONTAINER_NOT_FOUND; - -/** - * Handle block deletion commands. - */ -public class DeleteBlocksCommandHandler implements CommandHandler { - - public static final Logger LOG = - LoggerFactory.getLogger(DeleteBlocksCommandHandler.class); - - private final ContainerSet containerSet; - private final Configuration conf; - private int invocationCount; - private long totalTime; - private boolean cmdExecuted; - - public DeleteBlocksCommandHandler(ContainerSet cset, - Configuration conf) { - this.containerSet = cset; - this.conf = conf; - } - - @Override - public void handle(SCMCommand command, OzoneContainer container, - StateContext context, SCMConnectionManager connectionManager) { - cmdExecuted = false; - long startTime = Time.monotonicNow(); - ContainerBlocksDeletionACKProto blockDeletionACK = null; - try { - if (command.getType() != SCMCommandProto.Type.deleteBlocksCommand) { - LOG.warn("Skipping handling command, expected command " - + "type {} but found {}", - SCMCommandProto.Type.deleteBlocksCommand, command.getType()); - return; - } - LOG.debug("Processing block deletion command."); - invocationCount++; - - // move blocks to deleting state. - // this is a metadata update, the actual deletion happens in another - // recycling thread. - DeleteBlocksCommand cmd = (DeleteBlocksCommand) command; - List containerBlocks = cmd.blocksTobeDeleted(); - - DeletedContainerBlocksSummary summary = - DeletedContainerBlocksSummary.getFrom(containerBlocks); - LOG.info("Start to delete container blocks, TXIDs={}, " - + "numOfContainers={}, numOfBlocks={}", - summary.getTxIDSummary(), - summary.getNumOfContainers(), - summary.getNumOfBlocks()); - - ContainerBlocksDeletionACKProto.Builder resultBuilder = - ContainerBlocksDeletionACKProto.newBuilder(); - containerBlocks.forEach(entry -> { - DeleteBlockTransactionResult.Builder txResultBuilder = - DeleteBlockTransactionResult.newBuilder(); - txResultBuilder.setTxID(entry.getTxID()); - long containerId = entry.getContainerID(); - try { - Container cont = containerSet.getContainer(containerId); - if (cont == null) { - throw new StorageContainerException("Unable to find the container " - + containerId, CONTAINER_NOT_FOUND); - } - ContainerProtos.ContainerType containerType = cont.getContainerType(); - switch (containerType) { - case KeyValueContainer: - KeyValueContainerData containerData = (KeyValueContainerData) - cont.getContainerData(); - cont.writeLock(); - try { - deleteKeyValueContainerBlocks(containerData, entry); - } finally { - cont.writeUnlock(); - } - txResultBuilder.setContainerID(containerId) - .setSuccess(true); - break; - default: - LOG.error( - "Delete Blocks Command Handler is not implemented for " + - "containerType {}", containerType); - } - } catch (IOException e) { - LOG.warn("Failed to delete blocks for container={}, TXID={}", - entry.getContainerID(), entry.getTxID(), e); - txResultBuilder.setContainerID(containerId) - .setSuccess(false); - } - resultBuilder.addResults(txResultBuilder.build()) - .setDnId(context.getParent().getDatanodeDetails() - .getUuid().toString()); - }); - blockDeletionACK = resultBuilder.build(); - - // Send ACK back to SCM as long as meta updated - // TODO Or we should wait until the blocks are actually deleted? - if (!containerBlocks.isEmpty()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Sending following block deletion ACK to SCM"); - for (DeleteBlockTransactionResult result : blockDeletionACK - .getResultsList()) { - LOG.debug(result.getTxID() + " : " + result.getSuccess()); - } - } - } - cmdExecuted = true; - } finally { - final ContainerBlocksDeletionACKProto deleteAck = - blockDeletionACK; - Consumer statusUpdater = (cmdStatus) -> { - cmdStatus.setStatus(cmdExecuted); - ((DeleteBlockCommandStatus) cmdStatus).setBlocksDeletionAck(deleteAck); - }; - updateCommandStatus(context, command, statusUpdater, LOG); - long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; - } - } - - /** - * Move a bunch of blocks from a container to deleting state. This is a meta - * update, the actual deletes happen in async mode. - * - * @param containerData - KeyValueContainerData - * @param delTX a block deletion transaction. - * @throws IOException if I/O error occurs. - */ - private void deleteKeyValueContainerBlocks( - KeyValueContainerData containerData, DeletedBlocksTransaction delTX) - throws IOException { - long containerId = delTX.getContainerID(); - if (LOG.isDebugEnabled()) { - LOG.debug("Processing Container : {}, DB path : {}", containerId, - containerData.getMetadataPath()); - } - - if (delTX.getTxID() < containerData.getDeleteTransactionId()) { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Ignoring delete blocks for containerId: %d." - + " Outdated delete transactionId %d < %d", containerId, - delTX.getTxID(), containerData.getDeleteTransactionId())); - } - return; - } - - int newDeletionBlocks = 0; - try(ReferenceCountedDB containerDB = - BlockUtils.getDB(containerData, conf)) { - for (Long blk : delTX.getLocalIDList()) { - BatchOperation batch = new BatchOperation(); - byte[] blkBytes = Longs.toByteArray(blk); - byte[] blkInfo = containerDB.getStore().get(blkBytes); - if (blkInfo != null) { - byte[] deletingKeyBytes = - DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk); - byte[] deletedKeyBytes = - DFSUtil.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blk); - if (containerDB.getStore().get(deletingKeyBytes) != null - || containerDB.getStore().get(deletedKeyBytes) != null) { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format( - "Ignoring delete for block %d in container %d." - + " Entry already added.", blk, containerId)); - } - continue; - } - // Found the block in container db, - // use an atomic update to change its state to deleting. - batch.put(deletingKeyBytes, blkInfo); - batch.delete(blkBytes); - try { - containerDB.getStore().writeBatch(batch); - newDeletionBlocks++; - if (LOG.isDebugEnabled()) { - LOG.debug("Transited Block {} to DELETING state in container {}", - blk, containerId); - } - } catch (IOException e) { - // if some blocks failed to delete, we fail this TX, - // without sending this ACK to SCM, SCM will resend the TX - // with a certain number of retries. - throw new IOException( - "Failed to delete blocks for TXID = " + delTX.getTxID(), e); - } - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("Block {} not found or already under deletion in" - + " container {}, skip deleting it.", blk, containerId); - } - } - } - - containerDB.getStore() - .put(DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX), - Longs.toByteArray(delTX.getTxID())); - containerData - .updateDeleteTransactionId(delTX.getTxID()); - // update pending deletion blocks count in in-memory container status - containerData.incrPendingDeletionBlocks(newDeletionBlocks); - } - } - - @Override - public SCMCommandProto.Type getCommandType() { - return SCMCommandProto.Type.deleteBlocksCommand; - } - - @Override - public int getInvocationCount() { - return this.invocationCount; - } - - @Override - public long getAverageRunTime() { - if (invocationCount > 0) { - return totalTime / invocationCount; - } - return 0; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java deleted file mode 100644 index b54fb1a17ac05..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.ozone.container.common.statemachine - .SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * Handler to process the DeleteContainerCommand from SCM. - */ -public class DeleteContainerCommandHandler implements CommandHandler { - - private static final Logger LOG = - LoggerFactory.getLogger(DeleteContainerCommandHandler.class); - - private int invocationCount; - private long totalTime; - - @Override - public void handle(final SCMCommand command, - final OzoneContainer ozoneContainer, - final StateContext context, - final SCMConnectionManager connectionManager) { - final long startTime = Time.monotonicNow(); - invocationCount++; - try { - final DeleteContainerCommand deleteContainerCommand = - (DeleteContainerCommand) command; - final ContainerController controller = ozoneContainer.getController(); - controller.deleteContainer(deleteContainerCommand.getContainerID(), - deleteContainerCommand.isForce()); - } catch (IOException e) { - LOG.error("Exception occurred while deleting the container.", e); - } finally { - totalTime += Time.monotonicNow() - startTime; - } - - } - - @Override - public SCMCommandProto.Type getCommandType() { - return SCMCommandProto.Type.deleteContainerCommand; - } - - @Override - public int getInvocationCount() { - return this.invocationCount; - } - - @Override - public long getAverageRunTime() { - return invocationCount == 0 ? 0 : totalTime / invocationCount; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java deleted file mode 100644 index a028041b19675..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; -import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor; -import org.apache.hadoop.ozone.container.replication.ReplicationTask; -import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; - -import com.google.common.base.Preconditions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Command handler to copy containers from sources. - */ -public class ReplicateContainerCommandHandler implements CommandHandler { - - static final Logger LOG = - LoggerFactory.getLogger(ReplicateContainerCommandHandler.class); - - private int invocationCount; - - private long totalTime; - - private Configuration conf; - - private ReplicationSupervisor supervisor; - - public ReplicateContainerCommandHandler( - Configuration conf, - ReplicationSupervisor supervisor) { - this.conf = conf; - this.supervisor = supervisor; - } - - @Override - public void handle(SCMCommand command, OzoneContainer container, - StateContext context, SCMConnectionManager connectionManager) { - - final ReplicateContainerCommand replicateCommand = - (ReplicateContainerCommand) command; - final List sourceDatanodes = - replicateCommand.getSourceDatanodes(); - final long containerID = replicateCommand.getContainerID(); - - Preconditions.checkArgument(sourceDatanodes.size() > 0, - String.format("Replication command is received for container %d " - + "but the size of source datanodes was 0.", containerID)); - - supervisor.addTask(new ReplicationTask(containerID, sourceDatanodes)); - } - - @Override - public SCMCommandProto.Type getCommandType() { - return Type.replicateContainerCommand; - } - - @Override - public int getInvocationCount() { - return this.invocationCount; - } - - @Override - public long getAverageRunTime() { - if (invocationCount > 0) { - return totalTime / invocationCount; - } - return 0; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java deleted file mode 100644 index 1e9c8dc5eeedb..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java deleted file mode 100644 index feb2f812ac844..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine; -/** - - State machine class is used by the container to denote various states a - container can be in and also is used for command processing. - - Container has the following states. - - Start - > getVersion -> Register -> Running -> Shutdown - - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/DatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/DatanodeState.java deleted file mode 100644 index 25be207dcd9c8..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/DatanodeState.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.states; - -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * State Interface that allows tasks to maintain states. - */ -public interface DatanodeState { - /** - * Called before entering this state. - */ - void onEnter(); - - /** - * Called After exiting this state. - */ - void onExit(); - - /** - * Executes one or more tasks that is needed by this state. - * - * @param executor - ExecutorService - */ - void execute(ExecutorService executor); - - /** - * Wait for execute to finish. - * - * @param time - Time - * @param timeUnit - Unit of time. - * @throws InterruptedException - * @throws ExecutionException - * @throws TimeoutException - */ - T await(long time, TimeUnit timeUnit) - throws InterruptedException, ExecutionException, TimeoutException; - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java deleted file mode 100644 index 273886228f60e..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java +++ /dev/null @@ -1,182 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.states.datanode; - -import com.google.common.base.Strings; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.states.DatanodeState; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.Collection; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.hdds.HddsUtils.getSCMAddresses; - -/** - * Init Datanode State is the task that gets run when we are in Init State. - */ -public class InitDatanodeState implements DatanodeState, - Callable { - static final Logger LOG = LoggerFactory.getLogger(InitDatanodeState.class); - private final SCMConnectionManager connectionManager; - private final Configuration conf; - private final StateContext context; - private Future result; - - /** - * Create InitDatanodeState Task. - * - * @param conf - Conf - * @param connectionManager - Connection Manager - * @param context - Current Context - */ - public InitDatanodeState(Configuration conf, - SCMConnectionManager connectionManager, - StateContext context) { - this.conf = conf; - this.connectionManager = connectionManager; - this.context = context; - } - - /** - * Computes a result, or throws an exception if unable to do so. - * - * @return computed result - * @throws Exception if unable to compute a result - */ - @Override - public DatanodeStateMachine.DatanodeStates call() throws Exception { - Collection addresses = null; - try { - addresses = getSCMAddresses(conf); - } catch (IllegalArgumentException e) { - if(!Strings.isNullOrEmpty(e.getMessage())) { - LOG.error("Failed to get SCM addresses: " + e.getMessage()); - } - return DatanodeStateMachine.DatanodeStates.SHUTDOWN; - } - - if (addresses == null || addresses.isEmpty()) { - LOG.error("Null or empty SCM address list found."); - return DatanodeStateMachine.DatanodeStates.SHUTDOWN; - } else { - for (InetSocketAddress addr : addresses) { - if (addr.isUnresolved()) { - LOG.warn("One SCM address ({}) can't (yet?) be resolved. Postpone " - + "initialization.", addr); - - //skip any further initialization. DatanodeStateMachine will try it - // again after the hb frequency - return this.context.getState(); - } - } - for (InetSocketAddress addr : addresses) { - connectionManager.addSCMServer(addr); - } - } - - // If datanode ID is set, persist it to the ID file. - persistContainerDatanodeDetails(); - - return this.context.getState().getNextState(); - } - - /** - * Persist DatanodeDetails to datanode.id file. - */ - private void persistContainerDatanodeDetails() { - String dataNodeIDPath = HddsServerUtil.getDatanodeIdFilePath(conf); - if (Strings.isNullOrEmpty(dataNodeIDPath)) { - LOG.error("A valid path is needed for config setting {}", - ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR); - this.context.setState(DatanodeStateMachine.DatanodeStates.SHUTDOWN); - return; - } - File idPath = new File(dataNodeIDPath); - DatanodeDetails datanodeDetails = this.context.getParent() - .getDatanodeDetails(); - if (datanodeDetails != null && !idPath.exists()) { - try { - ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath); - } catch (IOException ex) { - // As writing DatanodeDetails in to datanodeid file failed, which is - // a critical thing, so shutting down the state machine. - LOG.error("Writing to {} failed {}", dataNodeIDPath, ex.getMessage()); - this.context.setState(DatanodeStateMachine.DatanodeStates.SHUTDOWN); - return; - } - LOG.info("DatanodeDetails is persisted to {}", dataNodeIDPath); - } - } - - /** - * Called before entering this state. - */ - @Override - public void onEnter() { - LOG.trace("Entering init container state"); - } - - /** - * Called After exiting this state. - */ - @Override - public void onExit() { - LOG.trace("Exiting init container state"); - } - - /** - * Executes one or more tasks that is needed by this state. - * - * @param executor - ExecutorService - */ - @Override - public void execute(ExecutorService executor) { - result = executor.submit(this); - } - - /** - * Wait for execute to finish. - * - * @param time - Time - * @param timeUnit - Unit of time. - */ - @Override - public DatanodeStateMachine.DatanodeStates await(long time, - TimeUnit timeUnit) throws InterruptedException, - ExecutionException, TimeoutException { - return result.get(time, timeUnit); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java deleted file mode 100644 index 6b596fe14f4c3..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java +++ /dev/null @@ -1,187 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.states.datanode; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.states.DatanodeState; -import org.apache.hadoop.ozone.container.common.states.endpoint.HeartbeatEndpointTask; -import org.apache.hadoop.ozone.container.common.states.endpoint.RegisterEndpointTask; -import org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletionService; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorCompletionService; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * Class that implements handshake with SCM. - */ -public class RunningDatanodeState implements DatanodeState { - static final Logger - LOG = LoggerFactory.getLogger(RunningDatanodeState.class); - private final SCMConnectionManager connectionManager; - private final Configuration conf; - private final StateContext context; - private CompletionService ecs; - - public RunningDatanodeState(Configuration conf, - SCMConnectionManager connectionManager, - StateContext context) { - this.connectionManager = connectionManager; - this.conf = conf; - this.context = context; - } - - /** - * Called before entering this state. - */ - @Override - public void onEnter() { - LOG.trace("Entering handshake task."); - } - - /** - * Called After exiting this state. - */ - @Override - public void onExit() { - LOG.trace("Exiting handshake task."); - } - - /** - * Executes one or more tasks that is needed by this state. - * - * @param executor - ExecutorService - */ - @Override - public void execute(ExecutorService executor) { - ecs = new ExecutorCompletionService<>(executor); - for (EndpointStateMachine endpoint : connectionManager.getValues()) { - Callable endpointTask - = getEndPointTask(endpoint); - if (endpointTask != null) { - ecs.submit(endpointTask); - } else { - // This can happen if a task is taking more time than the timeOut - // specified for the task in await, and when it is completed the task - // has set the state to Shutdown, we may see the state as shutdown - // here. So, we need to Shutdown DatanodeStateMachine. - LOG.error("State is Shutdown in RunningDatanodeState"); - context.setState(DatanodeStateMachine.DatanodeStates.SHUTDOWN); - } - } - } - //TODO : Cache some of these tasks instead of creating them - //all the time. - private Callable - getEndPointTask(EndpointStateMachine endpoint) { - switch (endpoint.getState()) { - case GETVERSION: - return new VersionEndpointTask(endpoint, conf, context.getParent() - .getContainer()); - case REGISTER: - return RegisterEndpointTask.newBuilder() - .setConfig(conf) - .setEndpointStateMachine(endpoint) - .setContext(context) - .setDatanodeDetails(context.getParent().getDatanodeDetails()) - .setOzoneContainer(context.getParent().getContainer()) - .build(); - case HEARTBEAT: - return HeartbeatEndpointTask.newBuilder() - .setConfig(conf) - .setEndpointStateMachine(endpoint) - .setDatanodeDetails(context.getParent().getDatanodeDetails()) - .setContext(context) - .build(); - case SHUTDOWN: - break; - default: - throw new IllegalArgumentException("Illegal Argument."); - } - return null; - } - - /** - * Computes the next state the container state machine must move to by looking - * at all the state of endpoints. - *

- * if any endpoint state has moved to Shutdown, either we have an - * unrecoverable error or we have been told to shutdown. Either case the - * datanode state machine should move to Shutdown state, otherwise we - * remain in the Running state. - * - * @return next container state. - */ - private DatanodeStateMachine.DatanodeStates - computeNextContainerState( - List> results) { - for (Future state : results) { - try { - if (state.get() == EndpointStateMachine.EndPointStates.SHUTDOWN) { - // if any endpoint tells us to shutdown we move to shutdown state. - return DatanodeStateMachine.DatanodeStates.SHUTDOWN; - } - } catch (InterruptedException | ExecutionException e) { - LOG.error("Error in executing end point task.", e); - } - } - return DatanodeStateMachine.DatanodeStates.RUNNING; - } - - /** - * Wait for execute to finish. - * - * @param duration - Time - * @param timeUnit - Unit of duration. - */ - @Override - public DatanodeStateMachine.DatanodeStates - await(long duration, TimeUnit timeUnit) - throws InterruptedException, ExecutionException, TimeoutException { - int count = connectionManager.getValues().size(); - int returned = 0; - long timeLeft = timeUnit.toMillis(duration); - long startTime = Time.monotonicNow(); - List> results = new - LinkedList<>(); - - while (returned < count && timeLeft > 0) { - Future result = - ecs.poll(timeLeft, TimeUnit.MILLISECONDS); - if (result != null) { - results.add(result); - returned++; - } - timeLeft = timeLeft - (Time.monotonicNow() - startTime); - } - return computeNextContainerState(results); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java deleted file mode 100644 index 6b8d16c6d39a1..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.states.datanode; -/** - This package contians files that guide the state transitions from - Init->Running->Shutdown for the datanode. - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java deleted file mode 100644 index c50f4573d07f1..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ /dev/null @@ -1,402 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.states.endpoint; - -import com.google.common.base.Preconditions; -import com.google.protobuf.Descriptors; -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineActionsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerActionsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.ozone.container.common.helpers - .DeletedContainerBlocksSummary; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine.EndPointStates; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; -import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.time.ZonedDateTime; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.Callable; - -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_CONTAINER_ACTION_MAX_LIMIT; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_PIPELINE_ACTION_MAX_LIMIT; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_PIPELINE_ACTION_MAX_LIMIT_DEFAULT; - -/** - * Heartbeat class for SCMs. - */ -public class HeartbeatEndpointTask - implements Callable { - static final Logger LOG = - LoggerFactory.getLogger(HeartbeatEndpointTask.class); - private final EndpointStateMachine rpcEndpoint; - private final Configuration conf; - private DatanodeDetailsProto datanodeDetailsProto; - private StateContext context; - private int maxContainerActionsPerHB; - private int maxPipelineActionsPerHB; - - /** - * Constructs a SCM heart beat. - * - * @param conf Config. - */ - public HeartbeatEndpointTask(EndpointStateMachine rpcEndpoint, - Configuration conf, StateContext context) { - this.rpcEndpoint = rpcEndpoint; - this.conf = conf; - this.context = context; - this.maxContainerActionsPerHB = conf.getInt(HDDS_CONTAINER_ACTION_MAX_LIMIT, - HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT); - this.maxPipelineActionsPerHB = conf.getInt(HDDS_PIPELINE_ACTION_MAX_LIMIT, - HDDS_PIPELINE_ACTION_MAX_LIMIT_DEFAULT); - } - - /** - * Get the container Node ID proto. - * - * @return ContainerNodeIDProto - */ - public DatanodeDetailsProto getDatanodeDetailsProto() { - return datanodeDetailsProto; - } - - /** - * Set container node ID proto. - * - * @param datanodeDetailsProto - the node id. - */ - public void setDatanodeDetailsProto(DatanodeDetailsProto - datanodeDetailsProto) { - this.datanodeDetailsProto = datanodeDetailsProto; - } - - /** - * Computes a result, or throws an exception if unable to do so. - * - * @return computed result - * @throws Exception if unable to compute a result - */ - @Override - public EndpointStateMachine.EndPointStates call() throws Exception { - rpcEndpoint.lock(); - SCMHeartbeatRequestProto.Builder requestBuilder = null; - try { - Preconditions.checkState(this.datanodeDetailsProto != null); - - requestBuilder = SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(datanodeDetailsProto); - addReports(requestBuilder); - addContainerActions(requestBuilder); - addPipelineActions(requestBuilder); - SCMHeartbeatRequestProto request = requestBuilder.build(); - if (LOG.isDebugEnabled()) { - LOG.debug("Sending heartbeat message :: {}", request.toString()); - } - SCMHeartbeatResponseProto reponse = rpcEndpoint.getEndPoint() - .sendHeartbeat(request); - processResponse(reponse, datanodeDetailsProto); - rpcEndpoint.setLastSuccessfulHeartbeat(ZonedDateTime.now()); - rpcEndpoint.zeroMissedCount(); - } catch (IOException ex) { - // put back the reports which failed to be sent - if (requestBuilder != null) { - putBackReports(requestBuilder); - } - rpcEndpoint.logIfNeeded(ex); - } finally { - rpcEndpoint.unlock(); - } - return rpcEndpoint.getState(); - } - - // TODO: Make it generic. - private void putBackReports(SCMHeartbeatRequestProto.Builder requestBuilder) { - List reports = new LinkedList<>(); - if (requestBuilder.hasContainerReport()) { - reports.add(requestBuilder.getContainerReport()); - } - if (requestBuilder.hasNodeReport()) { - reports.add(requestBuilder.getNodeReport()); - } - if (requestBuilder.getCommandStatusReportsCount() != 0) { - reports.addAll(requestBuilder.getCommandStatusReportsList()); - } - if (requestBuilder.getIncrementalContainerReportCount() != 0) { - reports.addAll(requestBuilder.getIncrementalContainerReportList()); - } - context.putBackReports(reports); - } - - /** - * Adds all the available reports to heartbeat. - * - * @param requestBuilder builder to which the report has to be added. - */ - private void addReports(SCMHeartbeatRequestProto.Builder requestBuilder) { - for (GeneratedMessage report : context.getAllAvailableReports()) { - String reportName = report.getDescriptorForType().getFullName(); - for (Descriptors.FieldDescriptor descriptor : - SCMHeartbeatRequestProto.getDescriptor().getFields()) { - String heartbeatFieldName = descriptor.getMessageType().getFullName(); - if (heartbeatFieldName.equals(reportName)) { - if (descriptor.isRepeated()) { - requestBuilder.addRepeatedField(descriptor, report); - } else { - requestBuilder.setField(descriptor, report); - } - } - } - } - } - - /** - * Adds all the pending ContainerActions to the heartbeat. - * - * @param requestBuilder builder to which the report has to be added. - */ - private void addContainerActions( - SCMHeartbeatRequestProto.Builder requestBuilder) { - List actions = context.getPendingContainerAction( - maxContainerActionsPerHB); - if (!actions.isEmpty()) { - ContainerActionsProto cap = ContainerActionsProto.newBuilder() - .addAllContainerActions(actions) - .build(); - requestBuilder.setContainerActions(cap); - } - } - - /** - * Adds all the pending PipelineActions to the heartbeat. - * - * @param requestBuilder builder to which the report has to be added. - */ - private void addPipelineActions( - SCMHeartbeatRequestProto.Builder requestBuilder) { - List actions = context.getPendingPipelineAction( - maxPipelineActionsPerHB); - if (!actions.isEmpty()) { - PipelineActionsProto pap = PipelineActionsProto.newBuilder() - .addAllPipelineActions(actions) - .build(); - requestBuilder.setPipelineActions(pap); - } - } - - /** - * Returns a builder class for HeartbeatEndpointTask task. - * @return Builder. - */ - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Add this command to command processing Queue. - * - * @param response - SCMHeartbeat response. - */ - private void processResponse(SCMHeartbeatResponseProto response, - final DatanodeDetailsProto datanodeDetails) { - Preconditions.checkState(response.getDatanodeUUID() - .equalsIgnoreCase(datanodeDetails.getUuid()), - "Unexpected datanode ID in the response."); - // Verify the response is indeed for this datanode. - for (SCMCommandProto commandResponseProto : response - .getCommandsList()) { - switch (commandResponseProto.getCommandType()) { - case reregisterCommand: - if (rpcEndpoint.getState() == EndPointStates.HEARTBEAT) { - if (LOG.isDebugEnabled()) { - LOG.debug("Received SCM notification to register." - + " Interrupt HEARTBEAT and transit to REGISTER state."); - } - rpcEndpoint.setState(EndPointStates.REGISTER); - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("Illegal state {} found, expecting {}.", - rpcEndpoint.getState().name(), EndPointStates.HEARTBEAT); - } - } - break; - case deleteBlocksCommand: - DeleteBlocksCommand db = DeleteBlocksCommand - .getFromProtobuf( - commandResponseProto.getDeleteBlocksCommandProto()); - if (!db.blocksTobeDeleted().isEmpty()) { - if (LOG.isDebugEnabled()) { - LOG.debug(DeletedContainerBlocksSummary - .getFrom(db.blocksTobeDeleted()) - .toString()); - } - this.context.addCommand(db); - } - break; - case closeContainerCommand: - CloseContainerCommand closeContainer = - CloseContainerCommand.getFromProtobuf( - commandResponseProto.getCloseContainerCommandProto()); - if (LOG.isDebugEnabled()) { - LOG.debug("Received SCM container close request for container {}", - closeContainer.getContainerID()); - } - this.context.addCommand(closeContainer); - break; - case replicateContainerCommand: - ReplicateContainerCommand replicateContainerCommand = - ReplicateContainerCommand.getFromProtobuf( - commandResponseProto.getReplicateContainerCommandProto()); - if (LOG.isDebugEnabled()) { - LOG.debug("Received SCM container replicate request for container {}", - replicateContainerCommand.getContainerID()); - } - this.context.addCommand(replicateContainerCommand); - break; - case deleteContainerCommand: - DeleteContainerCommand deleteContainerCommand = - DeleteContainerCommand.getFromProtobuf( - commandResponseProto.getDeleteContainerCommandProto()); - if (LOG.isDebugEnabled()) { - LOG.debug("Received SCM delete container request for container {}", - deleteContainerCommand.getContainerID()); - } - this.context.addCommand(deleteContainerCommand); - break; - default: - throw new IllegalArgumentException("Unknown response : " - + commandResponseProto.getCommandType().name()); - } - } - } - - /** - * Builder class for HeartbeatEndpointTask. - */ - public static class Builder { - private EndpointStateMachine endPointStateMachine; - private Configuration conf; - private DatanodeDetails datanodeDetails; - private StateContext context; - - /** - * Constructs the builder class. - */ - public Builder() { - } - - /** - * Sets the endpoint state machine. - * - * @param rpcEndPoint - Endpoint state machine. - * @return Builder - */ - public Builder setEndpointStateMachine(EndpointStateMachine rpcEndPoint) { - this.endPointStateMachine = rpcEndPoint; - return this; - } - - /** - * Sets the Config. - * - * @param config - config - * @return Builder - */ - public Builder setConfig(Configuration config) { - this.conf = config; - return this; - } - - /** - * Sets the NodeID. - * - * @param dnDetails - NodeID proto - * @return Builder - */ - public Builder setDatanodeDetails(DatanodeDetails dnDetails) { - this.datanodeDetails = dnDetails; - return this; - } - - /** - * Sets the context. - * @param stateContext - State context. - * @return this. - */ - public Builder setContext(StateContext stateContext) { - this.context = stateContext; - return this; - } - - public HeartbeatEndpointTask build() { - if (endPointStateMachine == null) { - LOG.error("No endpoint specified."); - throw new IllegalArgumentException("A valid endpoint state machine is" + - " needed to construct HeartbeatEndpointTask task"); - } - - if (conf == null) { - LOG.error("No config specified."); - throw new IllegalArgumentException("A valid configration is needed to" + - " construct HeartbeatEndpointTask task"); - } - - if (datanodeDetails == null) { - LOG.error("No datanode specified."); - throw new IllegalArgumentException("A vaild Node ID is needed to " + - "construct HeartbeatEndpointTask task"); - } - - HeartbeatEndpointTask task = new HeartbeatEndpointTask(this - .endPointStateMachine, this.conf, this.context); - task.setDatanodeDetailsProto(datanodeDetails.getProtoBufMessage()); - return task; - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java deleted file mode 100644 index b94b1cfc85d8a..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java +++ /dev/null @@ -1,261 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.states.endpoint; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.Future; - -/** - * Register a datanode with SCM. - */ -public final class RegisterEndpointTask implements - Callable { - static final Logger LOG = LoggerFactory.getLogger(RegisterEndpointTask.class); - - private final EndpointStateMachine rpcEndPoint; - private final Configuration conf; - private Future result; - private DatanodeDetails datanodeDetails; - private final OzoneContainer datanodeContainerManager; - private StateContext stateContext; - - /** - * Creates a register endpoint task. - * - * @param rpcEndPoint - endpoint - * @param conf - conf - * @param ozoneContainer - container - */ - @VisibleForTesting - public RegisterEndpointTask(EndpointStateMachine rpcEndPoint, - Configuration conf, OzoneContainer ozoneContainer, - StateContext context) { - this.rpcEndPoint = rpcEndPoint; - this.conf = conf; - this.datanodeContainerManager = ozoneContainer; - this.stateContext = context; - - } - - /** - * Get the DatanodeDetails. - * - * @return DatanodeDetailsProto - */ - public DatanodeDetails getDatanodeDetails() { - return datanodeDetails; - } - - /** - * Set the contiainerNodeID Proto. - * - * @param datanodeDetails - Container Node ID. - */ - public void setDatanodeDetails( - DatanodeDetails datanodeDetails) { - this.datanodeDetails = datanodeDetails; - } - - /** - * Computes a result, or throws an exception if unable to do so. - * - * @return computed result - * @throws Exception if unable to compute a result - */ - @Override - public EndpointStateMachine.EndPointStates call() throws Exception { - - if (getDatanodeDetails() == null) { - LOG.error("DatanodeDetails cannot be null in RegisterEndpoint task, " + - "shutting down the endpoint."); - return rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN); - } - - rpcEndPoint.lock(); - try { - - ContainerReportsProto containerReport = datanodeContainerManager - .getController().getContainerReport(); - NodeReportProto nodeReport = datanodeContainerManager.getNodeReport(); - PipelineReportsProto pipelineReportsProto = - datanodeContainerManager.getPipelineReport(); - // TODO : Add responses to the command Queue. - SCMRegisteredResponseProto response = rpcEndPoint.getEndPoint() - .register(datanodeDetails.getProtoBufMessage(), nodeReport, - containerReport, pipelineReportsProto); - Preconditions.checkState(UUID.fromString(response.getDatanodeUUID()) - .equals(datanodeDetails.getUuid()), - "Unexpected datanode ID in the response."); - Preconditions.checkState(!StringUtils.isBlank(response.getClusterID()), - "Invalid cluster ID in the response."); - if (response.hasHostname() && response.hasIpAddress()) { - datanodeDetails.setHostName(response.getHostname()); - datanodeDetails.setIpAddress(response.getIpAddress()); - } - if (response.hasNetworkName() && response.hasNetworkLocation()) { - datanodeDetails.setNetworkName(response.getNetworkName()); - datanodeDetails.setNetworkLocation(response.getNetworkLocation()); - } - EndpointStateMachine.EndPointStates nextState = - rpcEndPoint.getState().getNextState(); - rpcEndPoint.setState(nextState); - rpcEndPoint.zeroMissedCount(); - this.stateContext.configureHeartbeatFrequency(); - } catch (IOException ex) { - rpcEndPoint.logIfNeeded(ex); - } finally { - rpcEndPoint.unlock(); - } - - return rpcEndPoint.getState(); - } - - /** - * Returns a builder class for RegisterEndPoint task. - * - * @return Builder. - */ - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Builder class for RegisterEndPoint task. - */ - public static class Builder { - private EndpointStateMachine endPointStateMachine; - private Configuration conf; - private DatanodeDetails datanodeDetails; - private OzoneContainer container; - private StateContext context; - - /** - * Constructs the builder class. - */ - public Builder() { - } - - /** - * Sets the endpoint state machine. - * - * @param rpcEndPoint - Endpoint state machine. - * @return Builder - */ - public Builder setEndpointStateMachine(EndpointStateMachine rpcEndPoint) { - this.endPointStateMachine = rpcEndPoint; - return this; - } - - /** - * Sets the Config. - * - * @param config - config - * @return Builder. - */ - public Builder setConfig(Configuration config) { - this.conf = config; - return this; - } - - /** - * Sets the NodeID. - * - * @param dnDetails - NodeID proto - * @return Builder - */ - public Builder setDatanodeDetails(DatanodeDetails dnDetails) { - this.datanodeDetails = dnDetails; - return this; - } - - /** - * Sets the ozonecontainer. - * @param ozoneContainer - * @return Builder - */ - public Builder setOzoneContainer(OzoneContainer ozoneContainer) { - this.container = ozoneContainer; - return this; - } - - public Builder setContext(StateContext stateContext) { - this.context = stateContext; - return this; - } - - public RegisterEndpointTask build() { - if (endPointStateMachine == null) { - LOG.error("No endpoint specified."); - throw new IllegalArgumentException("A valid endpoint state machine is" + - " needed to construct RegisterEndPoint task"); - } - - if (conf == null) { - LOG.error("No config specified."); - throw new IllegalArgumentException( - "A valid configuration is needed to construct RegisterEndpoint " - + "task"); - } - - if (datanodeDetails == null) { - LOG.error("No datanode specified."); - throw new IllegalArgumentException("A vaild Node ID is needed to " + - "construct RegisterEndpoint task"); - } - - if (container == null) { - LOG.error("Container is not specified"); - throw new IllegalArgumentException("Container is not specified to " + - "construct RegisterEndpoint task"); - } - - if (context == null) { - LOG.error("StateContext is not specified"); - throw new IllegalArgumentException("Container is not specified to " + - "construct RegisterEndpoint task"); - } - - RegisterEndpointTask task = new RegisterEndpointTask(this - .endPointStateMachine, this.conf, this.container, this.context); - task.setDatanodeDetails(datanodeDetails); - return task; - } - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java deleted file mode 100644 index 04eaa05f44c01..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.states.endpoint; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.Callable; - -/** - * Task that returns version. - */ -public class VersionEndpointTask implements - Callable { - public static final Logger LOG = LoggerFactory.getLogger(VersionEndpointTask - .class); - private final EndpointStateMachine rpcEndPoint; - private final Configuration configuration; - private final OzoneContainer ozoneContainer; - - public VersionEndpointTask(EndpointStateMachine rpcEndPoint, - Configuration conf, OzoneContainer container) { - this.rpcEndPoint = rpcEndPoint; - this.configuration = conf; - this.ozoneContainer = container; - } - - /** - * Computes a result, or throws an exception if unable to do so. - * - * @return computed result - * @throws Exception if unable to compute a result - */ - @Override - public EndpointStateMachine.EndPointStates call() throws Exception { - rpcEndPoint.lock(); - try{ - if (rpcEndPoint.getState().equals( - EndpointStateMachine.EndPointStates.GETVERSION)) { - SCMVersionResponseProto versionResponse = - rpcEndPoint.getEndPoint().getVersion(null); - VersionResponse response = VersionResponse.getFromProtobuf( - versionResponse); - rpcEndPoint.setVersion(response); - - String scmId = response.getValue(OzoneConsts.SCM_ID); - String clusterId = response.getValue(OzoneConsts.CLUSTER_ID); - - // Check volumes - VolumeSet volumeSet = ozoneContainer.getVolumeSet(); - volumeSet.writeLock(); - try { - Map volumeMap = volumeSet.getVolumeMap(); - - Preconditions.checkNotNull(scmId, "Reply from SCM: scmId cannot be " + - "null"); - Preconditions.checkNotNull(clusterId, "Reply from SCM: clusterId " + - "cannot be null"); - - // If version file does not exist - // create version file and also set scmId - - for (Map.Entry entry : volumeMap.entrySet()) { - HddsVolume hddsVolume = entry.getValue(); - boolean result = HddsVolumeUtil.checkVolume(hddsVolume, scmId, - clusterId, LOG); - if (!result) { - volumeSet.failVolume(hddsVolume.getHddsRootDir().getPath()); - } - } - if (volumeSet.getVolumesList().size() == 0) { - // All volumes are in inconsistent state - throw new DiskOutOfSpaceException("All configured Volumes are in " + - "Inconsistent State"); - } - } finally { - volumeSet.writeUnlock(); - } - - // Start the container services after getting the version information - ozoneContainer.start(scmId); - - EndpointStateMachine.EndPointStates nextState = - rpcEndPoint.getState().getNextState(); - rpcEndPoint.setState(nextState); - rpcEndPoint.zeroMissedCount(); - } else { - LOG.debug("Cannot execute GetVersion task as endpoint state machine " + - "is in {} state", rpcEndPoint.getState()); - } - } catch (DiskOutOfSpaceException ex) { - rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN); - } catch(IOException ex) { - rpcEndPoint.logIfNeeded(ex); - } finally { - rpcEndPoint.unlock(); - } - return rpcEndPoint.getState(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java deleted file mode 100644 index 112259834dd22..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.states.endpoint; -/** - This package contains code for RPC endpoints transitions. - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java deleted file mode 100644 index 92c953ff4109e..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.states; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java deleted file mode 100644 index dc5f5bc8547c5..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto - .XceiverClientProtocolServiceGrpc; -import org.apache.hadoop.hdds.security.token.TokenVerifier; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * Grpc Service for handling Container Commands on datanode. - */ -public class GrpcXceiverService extends - XceiverClientProtocolServiceGrpc.XceiverClientProtocolServiceImplBase { - public static final Logger - LOG = LoggerFactory.getLogger(GrpcXceiverService.class); - - private final ContainerDispatcher dispatcher; - private final boolean isGrpcTokenEnabled; - private final TokenVerifier tokenVerifier; - - public GrpcXceiverService(ContainerDispatcher dispatcher) { - this(dispatcher, false, null); - } - - public GrpcXceiverService(ContainerDispatcher dispatcher, - boolean grpcTokenEnabled, TokenVerifier tokenVerifier) { - this.dispatcher = dispatcher; - this.isGrpcTokenEnabled = grpcTokenEnabled; - this.tokenVerifier = tokenVerifier; - } - - @Override - public StreamObserver send( - StreamObserver responseObserver) { - return new StreamObserver() { - private final AtomicBoolean isClosed = new AtomicBoolean(false); - - @Override - public void onNext(ContainerCommandRequestProto request) { - try { - if(isGrpcTokenEnabled) { - // ServerInterceptors intercepts incoming request and creates ugi. - tokenVerifier.verify(UserGroupInformation.getCurrentUser() - .getShortUserName(), request.getEncodedToken()); - } - ContainerCommandResponseProto resp = - dispatcher.dispatch(request, null); - responseObserver.onNext(resp); - } catch (Throwable e) { - LOG.error("{} got exception when processing" - + " ContainerCommandRequestProto {}: {}", request, e); - responseObserver.onError(e); - } - } - - @Override - public void onError(Throwable t) { - // for now we just log a msg - LOG.error("{}: ContainerCommand send on error. Exception: {}", t); - } - - @Override - public void onCompleted() { - if (isClosed.compareAndSet(false, true)) { - LOG.debug("{}: ContainerCommand send completed"); - responseObserver.onCompleted(); - } - } - }; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ServerCredentialInterceptor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ServerCredentialInterceptor.java deleted file mode 100644 index 968f0c8071115..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ServerCredentialInterceptor.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server; - -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.token.TokenVerifier; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.thirdparty.io.grpc.Context; -import org.apache.ratis.thirdparty.io.grpc.Contexts; -import org.apache.ratis.thirdparty.io.grpc.Metadata; -import org.apache.ratis.thirdparty.io.grpc.ServerCall; -import org.apache.ratis.thirdparty.io.grpc.ServerCallHandler; -import org.apache.ratis.thirdparty.io.grpc.ServerInterceptor; -import org.apache.ratis.thirdparty.io.grpc.Status; - -import static org.apache.hadoop.ozone.OzoneConsts.OBT_METADATA_KEY; -import static org.apache.hadoop.ozone.OzoneConsts.USER_METADATA_KEY; -import static org.apache.hadoop.ozone.OzoneConsts.UGI_CTX_KEY; -/** - * Grpc Server Interceptor for Ozone Block token. - */ -public class ServerCredentialInterceptor implements ServerInterceptor { - - - private static final ServerCall.Listener NOOP_LISTENER = - new ServerCall.Listener() { - }; - - private final TokenVerifier verifier; - - ServerCredentialInterceptor(TokenVerifier verifier) { - this.verifier = verifier; - } - - @Override - public ServerCall.Listener interceptCall( - ServerCall call, Metadata headers, - ServerCallHandler next) { - String token = headers.get(OBT_METADATA_KEY); - String user = headers.get(USER_METADATA_KEY); - Context ctx = Context.current(); - try { - UserGroupInformation ugi = verifier.verify(user, token); - if (ugi == null) { - call.close(Status.UNAUTHENTICATED.withDescription("Missing Block " + - "Token from headers when block token is required."), headers); - return NOOP_LISTENER; - } else { - ctx = ctx.withValue(UGI_CTX_KEY, ugi); - } - } catch (SCMSecurityException e) { - call.close(Status.UNAUTHENTICATED.withDescription(e.getMessage()) - .withCause(e), headers); - return NOOP_LISTENER; - } - return Contexts.interceptCall(ctx, call, headers, next); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java deleted file mode 100644 index c6b0d9238bcc6..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.token.BlockTokenVerifier; -import org.apache.hadoop.hdds.security.token.TokenVerifier; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - -import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.MISSING_BLOCK_TOKEN; - -/** - * A server endpoint that acts as the communication layer for Ozone containers. - */ -public abstract class XceiverServer implements XceiverServerSpi { - - private final SecurityConfig secConfig; - private final TokenVerifier tokenVerifier; - private final CertificateClient caClient; - - public XceiverServer(Configuration conf, CertificateClient client) { - Preconditions.checkNotNull(conf); - this.secConfig = new SecurityConfig(conf); - this.caClient = client; - tokenVerifier = new BlockTokenVerifier(secConfig, getCaClient()); - } - - /** - * Default implementation which just validates security token if security is - * enabled. - * - * @param request ContainerCommandRequest - */ - @Override - public void submitRequest(ContainerCommandRequestProto request, - HddsProtos.PipelineID pipelineID) throws IOException { - if (secConfig.isSecurityEnabled()) { - String encodedToken = request.getEncodedToken(); - if (encodedToken == null) { - throw new SCMSecurityException("Security is enabled but client " + - "request is missing block token.", MISSING_BLOCK_TOKEN); - } - tokenVerifier.verify(encodedToken, encodedToken); - } - } - - @VisibleForTesting - protected CertificateClient getCaClient() { - return caClient; - } - - protected SecurityConfig getSecurityConfig() { - return secConfig; - } - - protected TokenVerifier getBlockTokenVerifier() { - return tokenVerifier; - } - - public SecurityConfig getSecConfig() { - return secConfig; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java deleted file mode 100644 index bb352ea5165c6..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReport; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.container.common.helpers. - StorageContainerException; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.tracing.GrpcServerInterceptor; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; - -import io.opentracing.Scope; -import org.apache.ratis.thirdparty.io.grpc.BindableService; -import org.apache.ratis.thirdparty.io.grpc.Server; -import org.apache.ratis.thirdparty.io.grpc.ServerBuilder; -import org.apache.ratis.thirdparty.io.grpc.ServerInterceptors; -import org.apache.ratis.thirdparty.io.grpc.netty.GrpcSslContexts; -import org.apache.ratis.thirdparty.io.grpc.netty.NettyServerBuilder; -import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -/** - * Creates a Grpc server endpoint that acts as the communication layer for - * Ozone containers. - */ -public final class XceiverServerGrpc extends XceiverServer { - private static final Logger - LOG = LoggerFactory.getLogger(XceiverServerGrpc.class); - private static final String COMPONENT = "dn"; - private int port; - private UUID id; - private Server server; - private final ContainerDispatcher storageContainer; - private boolean isStarted; - private DatanodeDetails datanodeDetails; - - - /** - * Constructs a Grpc server class. - * - * @param conf - Configuration - */ - public XceiverServerGrpc(DatanodeDetails datanodeDetails, Configuration conf, - ContainerDispatcher dispatcher, CertificateClient caClient, - BindableService... additionalServices) { - super(conf, caClient); - Preconditions.checkNotNull(conf); - - this.id = datanodeDetails.getUuid(); - this.datanodeDetails = datanodeDetails; - this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); - - if (conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) { - this.port = 0; - } - - NettyServerBuilder nettyServerBuilder = - ((NettyServerBuilder) ServerBuilder.forPort(port)) - .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE); - - ServerCredentialInterceptor credInterceptor = - new ServerCredentialInterceptor(getBlockTokenVerifier()); - GrpcServerInterceptor tracingInterceptor = new GrpcServerInterceptor(); - nettyServerBuilder.addService(ServerInterceptors.intercept( - new GrpcXceiverService(dispatcher, - getSecurityConfig().isBlockTokenEnabled(), - getBlockTokenVerifier()), credInterceptor, - tracingInterceptor)); - - for (BindableService service : additionalServices) { - nettyServerBuilder.addService(service); - } - - if (getSecConfig().isGrpcTlsEnabled()) { - try { - SslContextBuilder sslClientContextBuilder = SslContextBuilder.forServer( - caClient.getPrivateKey(), caClient.getCertificate()); - SslContextBuilder sslContextBuilder = GrpcSslContexts.configure( - sslClientContextBuilder, getSecurityConfig().getGrpcSslProvider()); - nettyServerBuilder.sslContext(sslContextBuilder.build()); - } catch (Exception ex) { - LOG.error("Unable to setup TLS for secure datanode GRPC endpoint.", ex); - } - } - server = nettyServerBuilder.build(); - storageContainer = dispatcher; - } - - @Override - public int getIPCPort() { - return this.port; - } - - /** - * Returns the Replication type supported by this end-point. - * - * @return enum -- {Stand_Alone, Ratis, Grpc, Chained} - */ - @Override - public HddsProtos.ReplicationType getServerType() { - return HddsProtos.ReplicationType.STAND_ALONE; - } - - @Override - public void start() throws IOException { - if (!isStarted) { - server.start(); - int realPort = server.getPort(); - - if (port == 0) { - LOG.info("{} {} is started using port {}", getClass().getSimpleName(), - this.id, realPort); - port = realPort; - } - - //register the real port to the datanode details. - datanodeDetails.setPort(DatanodeDetails - .newPort(Name.STANDALONE, - realPort)); - - isStarted = true; - } - } - - @Override - public void stop() { - if (isStarted) { - server.shutdown(); - try { - server.awaitTermination(5, TimeUnit.SECONDS); - } catch (Exception e) { - LOG.error("failed to shutdown XceiverServerGrpc", e); - } - isStarted = false; - } - } - - @Override - public void submitRequest(ContainerCommandRequestProto request, - HddsProtos.PipelineID pipelineID) throws IOException { - try (Scope scope = TracingUtil - .importAndCreateScope( - "XceiverServerGrpc." + request.getCmdType().name(), - request.getTraceID())) { - - super.submitRequest(request, pipelineID); - ContainerProtos.ContainerCommandResponseProto response = - storageContainer.dispatch(request, null); - if (response.getResult() != ContainerProtos.Result.SUCCESS) { - throw new StorageContainerException(response.getMessage(), - response.getResult()); - } - } - } - - @Override - public boolean isExist(HddsProtos.PipelineID pipelineId) { - return PipelineID.valueOf(id).getProtobuf().equals(pipelineId); - } - - @Override - public List getPipelineReport() { - return Collections.singletonList( - PipelineReport.newBuilder() - .setPipelineID(PipelineID.valueOf(id).getProtobuf()) - .build()); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java deleted file mode 100644 index 4e0d34384ce2a..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReport; - -import java.io.IOException; -import java.util.List; - -/** A server endpoint that acts as the communication layer for Ozone - * containers. */ -public interface XceiverServerSpi { - /** Starts the server. */ - void start() throws IOException; - - /** Stops a running server. */ - void stop(); - - /** Get server IPC port. */ - int getIPCPort(); - - /** - * Returns the Replication type supported by this end-point. - * @return enum -- {Stand_Alone, Ratis, Chained} - */ - HddsProtos.ReplicationType getServerType(); - - /** - * submits a containerRequest to be performed by the replication pipeline. - * @param request ContainerCommandRequest - */ - void submitRequest(ContainerCommandRequestProto request, - HddsProtos.PipelineID pipelineID) - throws IOException; - - /** - * Returns true if the given pipeline exist. - * - * @return true if pipeline present, else false - */ - boolean isExist(HddsProtos.PipelineID pipelineId); - - /** - * Get pipeline report for the XceiverServer instance. - * @return list of report for each pipeline. - */ - List getPipelineReport(); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java deleted file mode 100644 index 59c96f134969f..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server; - -/** - * This package contains classes for the server of the storage container - * protocol. - */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java deleted file mode 100644 index 9893ae48347f9..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java +++ /dev/null @@ -1,221 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.transport.server.ratis; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableRate; -import org.apache.hadoop.metrics2.lib.MetricsRegistry; -import org.apache.ratis.protocol.RaftGroupId; - -/** - * This class is for maintaining Container State Machine statistics. - */ -@InterfaceAudience.Private -@Metrics(about="Container State Machine Metrics", context="dfs") -public class CSMMetrics { - public static final String SOURCE_NAME = - CSMMetrics.class.getSimpleName(); - - // ratis op metrics metrics - private @Metric MutableCounterLong numWriteStateMachineOps; - private @Metric MutableCounterLong numQueryStateMachineOps; - private @Metric MutableCounterLong numApplyTransactionOps; - private @Metric MutableCounterLong numReadStateMachineOps; - private @Metric MutableCounterLong numBytesWrittenCount; - private @Metric MutableCounterLong numBytesCommittedCount; - - private @Metric MutableRate transactionLatency; - private MutableRate[] opsLatency; - private MetricsRegistry registry = null; - - // Failure Metrics - private @Metric MutableCounterLong numWriteStateMachineFails; - private @Metric MutableCounterLong numWriteDataFails; - private @Metric MutableCounterLong numQueryStateMachineFails; - private @Metric MutableCounterLong numApplyTransactionFails; - private @Metric MutableCounterLong numReadStateMachineFails; - private @Metric MutableCounterLong numReadStateMachineMissCount; - private @Metric MutableCounterLong numStartTransactionVerifyFailures; - private @Metric MutableCounterLong numContainerNotOpenVerifyFailures; - - private @Metric MutableRate applyTransaction; - private @Metric MutableRate writeStateMachineData; - - public CSMMetrics() { - int numCmdTypes = ContainerProtos.Type.values().length; - this.opsLatency = new MutableRate[numCmdTypes]; - this.registry = new MetricsRegistry(CSMMetrics.class.getSimpleName()); - for (int i = 0; i < numCmdTypes; i++) { - opsLatency[i] = registry.newRate( - ContainerProtos.Type.forNumber(i + 1).toString(), - ContainerProtos.Type.forNumber(i + 1) + " op"); - } - } - - public static CSMMetrics create(RaftGroupId gid) { - MetricsSystem ms = DefaultMetricsSystem.instance(); - return ms.register(SOURCE_NAME + gid.toString(), - "Container State Machine", - new CSMMetrics()); - } - - public void incNumWriteStateMachineOps() { - numWriteStateMachineOps.incr(); - } - - public void incNumQueryStateMachineOps() { - numQueryStateMachineOps.incr(); - } - - public void incNumReadStateMachineOps() { - numReadStateMachineOps.incr(); - } - - public void incNumApplyTransactionsOps() { - numApplyTransactionOps.incr(); - } - - public void incNumWriteStateMachineFails() { - numWriteStateMachineFails.incr(); - } - - public void incNumWriteDataFails() { - numWriteDataFails.incr(); - } - - public void incNumQueryStateMachineFails() { - numQueryStateMachineFails.incr(); - } - - public void incNumBytesWrittenCount(long value) { - numBytesWrittenCount.incr(value); - } - - public void incNumBytesCommittedCount(long value) { - numBytesCommittedCount.incr(value); - } - - public void incNumReadStateMachineFails() { - numReadStateMachineFails.incr(); - } - - public void incNumReadStateMachineMissCount() { - numReadStateMachineMissCount.incr(); - } - - public void incNumApplyTransactionsFails() { - numApplyTransactionFails.incr(); - } - - @VisibleForTesting - public long getNumWriteStateMachineOps() { - return numWriteStateMachineOps.value(); - } - - @VisibleForTesting - public long getNumQueryStateMachineOps() { - return numQueryStateMachineOps.value(); - } - - @VisibleForTesting - public long getNumApplyTransactionsOps() { - return numApplyTransactionOps.value(); - } - - @VisibleForTesting - public long getNumWriteStateMachineFails() { - return numWriteStateMachineFails.value(); - } - - @VisibleForTesting - public long getNumWriteDataFails() { - return numWriteDataFails.value(); - } - - @VisibleForTesting - public long getNumQueryStateMachineFails() { - return numQueryStateMachineFails.value(); - } - - @VisibleForTesting - public long getNumApplyTransactionsFails() { - return numApplyTransactionFails.value(); - } - - @VisibleForTesting - public long getNumReadStateMachineFails() { - return numReadStateMachineFails.value(); - } - - @VisibleForTesting - public long getNumReadStateMachineMissCount() { - return numReadStateMachineMissCount.value(); - } - - @VisibleForTesting - public long getNumReadStateMachineOps() { - return numReadStateMachineOps.value(); - } - - @VisibleForTesting - public long getNumBytesWrittenCount() { - return numBytesWrittenCount.value(); - } - - @VisibleForTesting - public long getNumBytesCommittedCount() { - return numBytesCommittedCount.value(); - } - - public MutableRate getApplyTransactionLatency() { - return applyTransaction; - } - - public void incPipelineLatency(ContainerProtos.Type type, long latencyNanos) { - opsLatency[type.ordinal()].add(latencyNanos); - transactionLatency.add(latencyNanos); - } - - public void incNumStartTransactionVerifyFailures() { - numStartTransactionVerifyFailures.incr(); - } - - public void incNumContainerNotOpenVerifyFailures() { - numContainerNotOpenVerifyFailures.incr(); - } - - public void recordApplyTransactionCompletion(long latencyNanos) { - applyTransaction.add(latencyNanos); - } - - public void recordWriteStateMachineCompletion(long latencyNanos) { - writeStateMachineData.add(latencyNanos); - } - - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE_NAME); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java deleted file mode 100644 index b89ec730f7c36..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ /dev/null @@ -1,871 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server.ratis; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.util.Time; -import org.apache.ratis.proto.RaftProtos.RaftPeerRole; -import org.apache.ratis.protocol.RaftGroupId; -import org.apache.ratis.protocol.StateMachineException; -import org.apache.ratis.server.RaftServer; -import org.apache.ratis.server.impl.RaftServerProxy; -import org.apache.ratis.server.protocol.TermIndex; -import org.apache.ratis.server.raftlog.RaftLog; -import org.apache.ratis.statemachine.impl.SingleFileSnapshotInfo; -import org.apache.ratis.thirdparty.com.google.protobuf - .InvalidProtocolBufferException; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - Container2BCSIDMapProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .WriteChunkRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadChunkRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadChunkResponseProto; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.hdds.security.token.TokenVerifier; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; -import org.apache.ratis.server.storage.RaftStorage; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.proto.RaftProtos.RoleInfoProto; -import org.apache.ratis.proto.RaftProtos.LogEntryProto; -import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto; -import org.apache.ratis.statemachine.StateMachineStorage; -import org.apache.ratis.statemachine.TransactionContext; -import org.apache.ratis.statemachine.impl.BaseStateMachine; -import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; -import java.util.concurrent.Executors; -import java.io.FileOutputStream; -import java.io.FileInputStream; -import java.io.OutputStream; - -/** A {@link org.apache.ratis.statemachine.StateMachine} for containers. - * - * The stateMachine is responsible for handling different types of container - * requests. The container requests can be divided into readonly and write - * requests. - * - * Read only requests are classified in - * {@link org.apache.hadoop.hdds.HddsUtils#isReadOnly} - * and these readonly requests are replied from the {@link #query(Message)}. - * - * The write requests can be divided into requests with user data - * (WriteChunkRequest) and other request without user data. - * - * Inorder to optimize the write throughput, the writeChunk request is - * processed in 2 phases. The 2 phases are divided in - * {@link #startTransaction(RaftClientRequest)}, in the first phase the user - * data is written directly into the state machine via - * {@link #writeStateMachineData} and in the second phase the - * transaction is committed via {@link #applyTransaction(TransactionContext)} - * - * For the requests with no stateMachine data, the transaction is directly - * committed through - * {@link #applyTransaction(TransactionContext)} - * - * There are 2 ordering operation which are enforced right now in the code, - * 1) Write chunk operation are executed after the create container operation, - * the write chunk operation will fail otherwise as the container still hasn't - * been created. Hence the create container operation has been split in the - * {@link #startTransaction(RaftClientRequest)}, this will help in synchronizing - * the calls in {@link #writeStateMachineData} - * - * 2) Write chunk commit operation is executed after write chunk state machine - * operation. This will ensure that commit operation is sync'd with the state - * machine operation.For example, synchronization between writeChunk and - * createContainer in {@link ContainerStateMachine}. - **/ - -public class ContainerStateMachine extends BaseStateMachine { - static final Logger LOG = - LoggerFactory.getLogger(ContainerStateMachine.class); - private final SimpleStateMachineStorage storage = - new SimpleStateMachineStorage(); - private final RaftGroupId gid; - private final ContainerDispatcher dispatcher; - private final ContainerController containerController; - private ThreadPoolExecutor chunkExecutor; - private final XceiverServerRatis ratisServer; - private final ConcurrentHashMap> writeChunkFutureMap; - - // keeps track of the containers created per pipeline - private final Map container2BCSIDMap; - private ExecutorService[] executors; - private final Map applyTransactionCompletionMap; - private final Cache stateMachineDataCache; - private final boolean isBlockTokenEnabled; - private final TokenVerifier tokenVerifier; - private final AtomicBoolean stateMachineHealthy; - - private final Semaphore applyTransactionSemaphore; - /** - * CSM metrics. - */ - private final CSMMetrics metrics; - - @SuppressWarnings("parameternumber") - public ContainerStateMachine(RaftGroupId gid, ContainerDispatcher dispatcher, - ContainerController containerController, ThreadPoolExecutor chunkExecutor, - XceiverServerRatis ratisServer, long expiryInterval, - boolean isBlockTokenEnabled, TokenVerifier tokenVerifier, - Configuration conf) { - this.gid = gid; - this.dispatcher = dispatcher; - this.containerController = containerController; - this.chunkExecutor = chunkExecutor; - this.ratisServer = ratisServer; - metrics = CSMMetrics.create(gid); - this.writeChunkFutureMap = new ConcurrentHashMap<>(); - applyTransactionCompletionMap = new ConcurrentHashMap<>(); - stateMachineDataCache = CacheBuilder.newBuilder() - .expireAfterAccess(expiryInterval, TimeUnit.MILLISECONDS) - // set the limit on no of cached entries equal to no of max threads - // executing writeStateMachineData - .maximumSize(chunkExecutor.getCorePoolSize()).build(); - this.isBlockTokenEnabled = isBlockTokenEnabled; - this.tokenVerifier = tokenVerifier; - this.container2BCSIDMap = new ConcurrentHashMap<>(); - - final int numContainerOpExecutors = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT); - int maxPendingApplyTransactions = conf.getInt( - ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS, - ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); - applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions); - stateMachineHealthy = new AtomicBoolean(true); - this.executors = new ExecutorService[numContainerOpExecutors]; - for (int i = 0; i < numContainerOpExecutors; i++) { - final int index = i; - this.executors[index] = Executors.newSingleThreadExecutor(r -> { - Thread t = new Thread(r); - t.setName("RatisApplyTransactionExecutor " + index); - return t; - }); - } - } - - @Override - public StateMachineStorage getStateMachineStorage() { - return storage; - } - - public CSMMetrics getMetrics() { - return metrics; - } - - @Override - public void initialize( - RaftServer server, RaftGroupId id, RaftStorage raftStorage) - throws IOException { - super.initialize(server, id, raftStorage); - storage.init(raftStorage); - ratisServer.notifyGroupAdd(gid); - - loadSnapshot(storage.getLatestSnapshot()); - } - - private long loadSnapshot(SingleFileSnapshotInfo snapshot) - throws IOException { - if (snapshot == null) { - TermIndex empty = - TermIndex.newTermIndex(0, RaftLog.INVALID_LOG_INDEX); - LOG.info("{}: The snapshot info is null. Setting the last applied index" + - "to:{}", gid, empty); - setLastAppliedTermIndex(empty); - return empty.getIndex(); - } - - final File snapshotFile = snapshot.getFile().getPath().toFile(); - final TermIndex last = - SimpleStateMachineStorage.getTermIndexFromSnapshotFile(snapshotFile); - LOG.info("{}: Setting the last applied index to {}", gid, last); - setLastAppliedTermIndex(last); - - // initialize the dispatcher with snapshot so that it build the missing - // container list - try (FileInputStream fin = new FileInputStream(snapshotFile)) { - byte[] container2BCSIDData = IOUtils.toByteArray(fin); - ContainerProtos.Container2BCSIDMapProto proto = - ContainerProtos.Container2BCSIDMapProto - .parseFrom(container2BCSIDData); - // read the created containers list from the snapshot file and add it to - // the container2BCSIDMap here. - // container2BCSIDMap will further grow as and when containers get created - container2BCSIDMap.putAll(proto.getContainer2BCSIDMap()); - dispatcher.buildMissingContainerSetAndValidate(container2BCSIDMap); - } - return last.getIndex(); - } - - /** - * As a part of taking snapshot with Ratis StateMachine, it will persist - * the existing container set in the snapshotFile. - * @param out OutputStream mapped to the Ratis snapshot file - * @throws IOException - */ - public void persistContainerSet(OutputStream out) throws IOException { - Container2BCSIDMapProto.Builder builder = - Container2BCSIDMapProto.newBuilder(); - builder.putAllContainer2BCSID(container2BCSIDMap); - // TODO : while snapshot is being taken, deleteContainer call should not - // should not happen. Lock protection will be required if delete - // container happens outside of Ratis. - IOUtils.write(builder.build().toByteArray(), out); - } - - public boolean isStateMachineHealthy() { - return stateMachineHealthy.get(); - } - - @Override - public long takeSnapshot() throws IOException { - TermIndex ti = getLastAppliedTermIndex(); - long startTime = Time.monotonicNow(); - if (!isStateMachineHealthy()) { - String msg = - "Failed to take snapshot " + " for " + gid + " as the stateMachine" - + " is unhealthy. The last applied index is at " + ti; - StateMachineException sme = new StateMachineException(msg); - LOG.error(msg); - throw sme; - } - if (ti != null && ti.getIndex() != RaftLog.INVALID_LOG_INDEX) { - final File snapshotFile = - storage.getSnapshotFile(ti.getTerm(), ti.getIndex()); - LOG.info("{}: Taking a snapshot at:{} file {}", gid, ti, snapshotFile); - try (FileOutputStream fos = new FileOutputStream(snapshotFile)) { - persistContainerSet(fos); - fos.flush(); - // make sure the snapshot file is synced - fos.getFD().sync(); - } catch (IOException ioe) { - LOG.error("{}: Failed to write snapshot at:{} file {}", gid, ti, - snapshotFile); - throw ioe; - } - LOG.info("{}: Finished taking a snapshot at:{} file:{} time:{}", gid, ti, - snapshotFile, (Time.monotonicNow() - startTime)); - return ti.getIndex(); - } - return -1; - } - - @Override - public TransactionContext startTransaction(RaftClientRequest request) - throws IOException { - long startTime = Time.monotonicNowNanos(); - final ContainerCommandRequestProto proto = - message2ContainerCommandRequestProto(request.getMessage()); - Preconditions.checkArgument(request.getRaftGroupId().equals(gid)); - try { - dispatcher.validateContainerCommand(proto); - } catch (IOException ioe) { - if (ioe instanceof ContainerNotOpenException) { - metrics.incNumContainerNotOpenVerifyFailures(); - } else { - metrics.incNumStartTransactionVerifyFailures(); - LOG.error("startTransaction validation failed on leader", ioe); - } - TransactionContext ctxt = TransactionContext.newBuilder() - .setClientRequest(request) - .setStateMachine(this) - .setServerRole(RaftPeerRole.LEADER) - .build(); - ctxt.setException(ioe); - return ctxt; - } - if (proto.getCmdType() == Type.WriteChunk) { - final WriteChunkRequestProto write = proto.getWriteChunk(); - // create the log entry proto - final WriteChunkRequestProto commitWriteChunkProto = - WriteChunkRequestProto.newBuilder() - .setBlockID(write.getBlockID()) - .setChunkData(write.getChunkData()) - // skipping the data field as it is - // already set in statemachine data proto - .build(); - ContainerCommandRequestProto commitContainerCommandProto = - ContainerCommandRequestProto - .newBuilder(proto) - .setWriteChunk(commitWriteChunkProto) - .setTraceID(proto.getTraceID()) - .build(); - - return TransactionContext.newBuilder() - .setClientRequest(request) - .setStateMachine(this) - .setServerRole(RaftPeerRole.LEADER) - .setStateMachineContext(startTime) - .setStateMachineData(write.getData()) - .setLogData(commitContainerCommandProto.toByteString()) - .build(); - } else { - return TransactionContext.newBuilder() - .setClientRequest(request) - .setStateMachine(this) - .setServerRole(RaftPeerRole.LEADER) - .setStateMachineContext(startTime) - .setLogData(proto.toByteString()) - .build(); - } - - } - - private ByteString getStateMachineData(StateMachineLogEntryProto entryProto) { - return entryProto.getStateMachineEntry().getStateMachineData(); - } - - private ContainerCommandRequestProto getContainerCommandRequestProto( - ByteString request) throws InvalidProtocolBufferException { - // TODO: We can avoid creating new builder and set pipeline Id if - // the client is already sending the pipeline id, then we just have to - // validate the pipeline Id. - return ContainerCommandRequestProto.newBuilder( - ContainerCommandRequestProto.parseFrom(request)) - .setPipelineID(gid.getUuid().toString()).build(); - } - - private ContainerCommandRequestProto message2ContainerCommandRequestProto( - Message message) throws InvalidProtocolBufferException { - return ContainerCommandRequestMessage.toProto(message.getContent(), gid); - } - - private ContainerCommandResponseProto dispatchCommand( - ContainerCommandRequestProto requestProto, DispatcherContext context) { - if (LOG.isTraceEnabled()) { - LOG.trace("{}: dispatch {} containerID={} pipelineID={} traceID={}", gid, - requestProto.getCmdType(), requestProto.getContainerID(), - requestProto.getPipelineID(), requestProto.getTraceID()); - } - if (isBlockTokenEnabled) { - try { - // ServerInterceptors intercepts incoming request and creates ugi. - tokenVerifier - .verify(UserGroupInformation.getCurrentUser().getShortUserName(), - requestProto.getEncodedToken()); - } catch (IOException ioe) { - StorageContainerException sce = new StorageContainerException( - "Block token verification failed. " + ioe.getMessage(), ioe, - ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED); - return ContainerUtils.logAndReturnError(LOG, sce, requestProto); - } - } - ContainerCommandResponseProto response = - dispatcher.dispatch(requestProto, context); - if (LOG.isTraceEnabled()) { - LOG.trace("{}: response {}", gid, response); - } - return response; - } - - private ContainerCommandResponseProto runCommand( - ContainerCommandRequestProto requestProto, - DispatcherContext context) { - return dispatchCommand(requestProto, context); - } - - private ExecutorService getCommandExecutor( - ContainerCommandRequestProto requestProto) { - int executorId = (int)(requestProto.getContainerID() % executors.length); - return executors[executorId]; - } - - private CompletableFuture handleWriteChunk( - ContainerCommandRequestProto requestProto, long entryIndex, long term, - long startTime) { - final WriteChunkRequestProto write = requestProto.getWriteChunk(); - RaftServer server = ratisServer.getServer(); - Preconditions.checkState(server instanceof RaftServerProxy); - try { - if (((RaftServerProxy) server).getImpl(gid).isLeader()) { - stateMachineDataCache.put(entryIndex, write.getData()); - } - } catch (IOException ioe) { - return completeExceptionally(ioe); - } - DispatcherContext context = - new DispatcherContext.Builder() - .setTerm(term) - .setLogIndex(entryIndex) - .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA) - .setContainer2BCSIDMap(container2BCSIDMap) - .build(); - CompletableFuture raftFuture = new CompletableFuture<>(); - // ensure the write chunk happens asynchronously in writeChunkExecutor pool - // thread. - CompletableFuture writeChunkFuture = - CompletableFuture.supplyAsync(() -> { - try { - return runCommand(requestProto, context); - } catch (Exception e) { - LOG.error(gid + ": writeChunk writeStateMachineData failed: blockId" - + write.getBlockID() + " logIndex " + entryIndex + " chunkName " - + write.getChunkData().getChunkName() + e); - raftFuture.completeExceptionally(e); - throw e; - } - }, chunkExecutor); - - writeChunkFutureMap.put(entryIndex, writeChunkFuture); - if (LOG.isDebugEnabled()) { - LOG.debug(gid + ": writeChunk writeStateMachineData : blockId " + - write.getBlockID() + " logIndex " + entryIndex + " chunkName " - + write.getChunkData().getChunkName()); - } - // Remove the future once it finishes execution from the - // writeChunkFutureMap. - writeChunkFuture.thenApply(r -> { - if (r.getResult() != ContainerProtos.Result.SUCCESS) { - StorageContainerException sce = - new StorageContainerException(r.getMessage(), r.getResult()); - LOG.error(gid + ": writeChunk writeStateMachineData failed: blockId" + - write.getBlockID() + " logIndex " + entryIndex + " chunkName " + - write.getChunkData().getChunkName() + " Error message: " + - r.getMessage() + " Container Result: " + r.getResult()); - metrics.incNumWriteDataFails(); - raftFuture.completeExceptionally(sce); - } else { - metrics.incNumBytesWrittenCount( - requestProto.getWriteChunk().getChunkData().getLen()); - if (LOG.isDebugEnabled()) { - LOG.debug(gid + - ": writeChunk writeStateMachineData completed: blockId" + - write.getBlockID() + " logIndex " + entryIndex + " chunkName " + - write.getChunkData().getChunkName()); - } - raftFuture.complete(r::toByteString); - metrics.recordWriteStateMachineCompletion( - Time.monotonicNowNanos() - startTime); - } - - writeChunkFutureMap.remove(entryIndex); - return r; - }); - return raftFuture; - } - - /* - * writeStateMachineData calls are not synchronized with each other - * and also with applyTransaction. - */ - @Override - public CompletableFuture writeStateMachineData(LogEntryProto entry) { - try { - metrics.incNumWriteStateMachineOps(); - long writeStateMachineStartTime = Time.monotonicNowNanos(); - ContainerCommandRequestProto requestProto = - getContainerCommandRequestProto( - entry.getStateMachineLogEntry().getLogData()); - WriteChunkRequestProto writeChunk = - WriteChunkRequestProto.newBuilder(requestProto.getWriteChunk()) - .setData(getStateMachineData(entry.getStateMachineLogEntry())) - .build(); - requestProto = ContainerCommandRequestProto.newBuilder(requestProto) - .setWriteChunk(writeChunk).build(); - Type cmdType = requestProto.getCmdType(); - - // For only writeChunk, there will be writeStateMachineData call. - // CreateContainer will happen as a part of writeChunk only. - switch (cmdType) { - case WriteChunk: - return handleWriteChunk(requestProto, entry.getIndex(), - entry.getTerm(), writeStateMachineStartTime); - default: - throw new IllegalStateException("Cmd Type:" + cmdType - + " should not have state machine data"); - } - } catch (IOException e) { - metrics.incNumWriteStateMachineFails(); - return completeExceptionally(e); - } - } - - @Override - public CompletableFuture query(Message request) { - try { - metrics.incNumQueryStateMachineOps(); - final ContainerCommandRequestProto requestProto = - message2ContainerCommandRequestProto(request); - return CompletableFuture - .completedFuture(runCommand(requestProto, null)::toByteString); - } catch (IOException e) { - metrics.incNumQueryStateMachineFails(); - return completeExceptionally(e); - } - } - - private ByteString readStateMachineData( - ContainerCommandRequestProto requestProto, long term, long index) - throws IOException { - // the stateMachine data is not present in the stateMachine cache, - // increment the stateMachine cache miss count - metrics.incNumReadStateMachineMissCount(); - WriteChunkRequestProto writeChunkRequestProto = - requestProto.getWriteChunk(); - ContainerProtos.ChunkInfo chunkInfo = writeChunkRequestProto.getChunkData(); - // prepare the chunk to be read - ReadChunkRequestProto.Builder readChunkRequestProto = - ReadChunkRequestProto.newBuilder() - .setBlockID(writeChunkRequestProto.getBlockID()) - .setChunkData(chunkInfo); - ContainerCommandRequestProto dataContainerCommandProto = - ContainerCommandRequestProto.newBuilder(requestProto) - .setCmdType(Type.ReadChunk).setReadChunk(readChunkRequestProto) - .build(); - DispatcherContext context = - new DispatcherContext.Builder().setTerm(term).setLogIndex(index) - .setReadFromTmpFile(true).build(); - // read the chunk - ContainerCommandResponseProto response = - dispatchCommand(dataContainerCommandProto, context); - if (response.getResult() != ContainerProtos.Result.SUCCESS) { - StorageContainerException sce = - new StorageContainerException(response.getMessage(), - response.getResult()); - LOG.error("gid {} : ReadStateMachine failed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, response.getCmdType(), index, - response.getMessage(), response.getResult()); - throw sce; - } - - ReadChunkResponseProto responseProto = response.getReadChunk(); - - ByteString data = responseProto.getData(); - // assert that the response has data in it. - Preconditions - .checkNotNull(data, "read chunk data is null for chunk:" + chunkInfo); - Preconditions.checkState(data.size() == chunkInfo.getLen(), String.format( - "read chunk len=%d does not match chunk expected len=%d for chunk:%s", - data.size(), chunkInfo.getLen(), chunkInfo)); - return data; - } - - /** - * Reads the Entry from the Cache or loads it back by reading from disk. - */ - private ByteString getCachedStateMachineData(Long logIndex, long term, - ContainerCommandRequestProto requestProto) throws ExecutionException { - return stateMachineDataCache.get(logIndex, - () -> readStateMachineData(requestProto, term, logIndex)); - } - - /** - * Returns the combined future of all the writeChunks till the given log - * index. The Raft log worker will wait for the stateMachineData to complete - * flush as well. - * - * @param index log index till which the stateMachine data needs to be flushed - * @return Combined future of all writeChunks till the log index given. - */ - @Override - public CompletableFuture flushStateMachineData(long index) { - List> futureList = - writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index) - .map(Map.Entry::getValue).collect(Collectors.toList()); - return CompletableFuture.allOf( - futureList.toArray(new CompletableFuture[futureList.size()])); - } - /* - * This api is used by the leader while appending logs to the follower - * This allows the leader to read the state machine data from the - * state machine implementation in case cached state machine data has been - * evicted. - */ - @Override - public CompletableFuture readStateMachineData( - LogEntryProto entry) { - StateMachineLogEntryProto smLogEntryProto = entry.getStateMachineLogEntry(); - metrics.incNumReadStateMachineOps(); - if (!getStateMachineData(smLogEntryProto).isEmpty()) { - return CompletableFuture.completedFuture(ByteString.EMPTY); - } - try { - final ContainerCommandRequestProto requestProto = - getContainerCommandRequestProto( - entry.getStateMachineLogEntry().getLogData()); - // readStateMachineData should only be called for "write" to Ratis. - Preconditions.checkArgument(!HddsUtils.isReadOnly(requestProto)); - if (requestProto.getCmdType() == Type.WriteChunk) { - final CompletableFuture future = new CompletableFuture<>(); - CompletableFuture.supplyAsync(() -> { - try { - future.complete( - getCachedStateMachineData(entry.getIndex(), entry.getTerm(), - requestProto)); - } catch (ExecutionException e) { - metrics.incNumReadStateMachineFails(); - future.completeExceptionally(e); - } - return future; - }, chunkExecutor); - return future; - } else { - throw new IllegalStateException("Cmd type:" + requestProto.getCmdType() - + " cannot have state machine data"); - } - } catch (Exception e) { - metrics.incNumReadStateMachineFails(); - LOG.error("{} unable to read stateMachineData:", gid, e); - return completeExceptionally(e); - } - } - - private synchronized void updateLastApplied() { - Long appliedTerm = null; - long appliedIndex = -1; - for(long i = getLastAppliedTermIndex().getIndex() + 1;; i++) { - final Long removed = applyTransactionCompletionMap.remove(i); - if (removed == null) { - break; - } - appliedTerm = removed; - appliedIndex = i; - } - if (appliedTerm != null) { - updateLastAppliedTermIndex(appliedTerm, appliedIndex); - } - } - - /** - * Notifies the state machine about index updates because of entries - * which do not cause state machine update, i.e. conf entries, metadata - * entries - * @param term term of the log entry - * @param index index of the log entry - */ - @Override - public void notifyIndexUpdate(long term, long index) { - applyTransactionCompletionMap.put(index, term); - } - - /* - * ApplyTransaction calls in Ratis are sequential. - */ - @Override - public CompletableFuture applyTransaction(TransactionContext trx) { - long index = trx.getLogEntry().getIndex(); - DispatcherContext.Builder builder = - new DispatcherContext.Builder() - .setTerm(trx.getLogEntry().getTerm()) - .setLogIndex(index); - - long applyTxnStartTime = Time.monotonicNowNanos(); - try { - applyTransactionSemaphore.acquire(); - metrics.incNumApplyTransactionsOps(); - ContainerCommandRequestProto requestProto = - getContainerCommandRequestProto( - trx.getStateMachineLogEntry().getLogData()); - Type cmdType = requestProto.getCmdType(); - // Make sure that in write chunk, the user data is not set - if (cmdType == Type.WriteChunk) { - Preconditions - .checkArgument(requestProto.getWriteChunk().getData().isEmpty()); - builder - .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA); - } - if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile - || cmdType == Type.PutBlock || cmdType == Type.CreateContainer) { - builder.setContainer2BCSIDMap(container2BCSIDMap); - } - CompletableFuture applyTransactionFuture = - new CompletableFuture<>(); - // Ensure the command gets executed in a separate thread than - // stateMachineUpdater thread which is calling applyTransaction here. - CompletableFuture future = - CompletableFuture.supplyAsync(() -> { - try { - return runCommand(requestProto, builder.build()); - } catch (Exception e) { - LOG.error("gid {} : ApplyTransaction failed. cmd {} logIndex " - + "{} exception {}", gid, requestProto.getCmdType(), - index, e); - applyTransactionFuture.completeExceptionally(e); - throw e; - } - }, getCommandExecutor(requestProto)); - future.thenApply(r -> { - if (trx.getServerRole() == RaftPeerRole.LEADER) { - long startTime = (long) trx.getStateMachineContext(); - metrics.incPipelineLatency(cmdType, - Time.monotonicNowNanos() - startTime); - } - // ignore close container exception while marking the stateMachine - // unhealthy - if (r.getResult() != ContainerProtos.Result.SUCCESS - && r.getResult() != ContainerProtos.Result.CONTAINER_NOT_OPEN - && r.getResult() != ContainerProtos.Result.CLOSED_CONTAINER_IO) { - StorageContainerException sce = - new StorageContainerException(r.getMessage(), r.getResult()); - LOG.error( - "gid {} : ApplyTransaction failed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, r.getCmdType(), index, - r.getMessage(), r.getResult()); - metrics.incNumApplyTransactionsFails(); - // Since the applyTransaction now is completed exceptionally, - // before any further snapshot is taken , the exception will be - // caught in stateMachineUpdater in Ratis and ratis server will - // shutdown. - applyTransactionFuture.completeExceptionally(sce); - stateMachineHealthy.compareAndSet(true, false); - ratisServer.handleApplyTransactionFailure(gid, trx.getServerRole()); - } else { - if (LOG.isDebugEnabled()) { - LOG.debug( - "gid {} : ApplyTransaction completed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, r.getCmdType(), index, - r.getMessage(), r.getResult()); - } - applyTransactionFuture.complete(r::toByteString); - if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile) { - metrics.incNumBytesCommittedCount( - requestProto.getWriteChunk().getChunkData().getLen()); - } - // add the entry to the applyTransactionCompletionMap only if the - // stateMachine is healthy i.e, there has been no applyTransaction - // failures before. - if (isStateMachineHealthy()) { - final Long previous = applyTransactionCompletionMap - .put(index, trx.getLogEntry().getTerm()); - Preconditions.checkState(previous == null); - updateLastApplied(); - } - } - return applyTransactionFuture; - }).whenComplete((r, t) -> { - applyTransactionSemaphore.release(); - metrics.recordApplyTransactionCompletion( - Time.monotonicNowNanos() - applyTxnStartTime); - }); - return applyTransactionFuture; - } catch (IOException | InterruptedException e) { - metrics.incNumApplyTransactionsFails(); - return completeExceptionally(e); - } - } - - private static CompletableFuture completeExceptionally(Exception e) { - final CompletableFuture future = new CompletableFuture<>(); - future.completeExceptionally(e); - return future; - } - - @VisibleForTesting - public void evictStateMachineCache() { - stateMachineDataCache.invalidateAll(); - stateMachineDataCache.cleanUp(); - } - - @Override - public void notifySlowness(RoleInfoProto roleInfoProto) { - ratisServer.handleNodeSlowness(gid, roleInfoProto); - } - - @Override - public void notifyExtendedNoLeader(RoleInfoProto roleInfoProto) { - ratisServer.handleNoLeader(gid, roleInfoProto); - } - - @Override - public void notifyNotLeader(Collection pendingEntries) - throws IOException { - evictStateMachineCache(); - } - - @Override - public void notifyLogFailed(Throwable t, LogEntryProto failedEntry) { - ratisServer.handleNodeLogFailure(gid, t); - } - - @Override - public CompletableFuture notifyInstallSnapshotFromLeader( - RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { - ratisServer.handleInstallSnapshotFromLeader(gid, roleInfoProto, - firstTermIndexInLog); - final CompletableFuture future = new CompletableFuture<>(); - future.complete(firstTermIndexInLog); - return future; - } - - @Override - public void notifyGroupRemove() { - ratisServer.notifyGroupRemove(gid); - // Make best effort to quasi-close all the containers on group removal. - // Containers already in terminal state like CLOSED or UNHEALTHY will not - // be affected. - for (Long cid : container2BCSIDMap.keySet()) { - try { - containerController.markContainerForClose(cid); - containerController.quasiCloseContainer(cid); - } catch (IOException e) { - } - } - } - - @Override - public void close() throws IOException { - evictStateMachineCache(); - for (ExecutorService executor : executors) { - executor.shutdown(); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java deleted file mode 100644 index 7d46910164eea..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.transport.server.ratis; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -import java.util.Map; - -/** - * DispatcherContext class holds transport protocol specific context info - * required for execution of container commands over the container dispatcher. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public final class DispatcherContext { - /** - * Determines which stage of writeChunk a write chunk request is for. - */ - public enum WriteChunkStage { - WRITE_DATA, COMMIT_DATA, COMBINED - } - - // whether the chunk data needs to be written or committed or both - private final WriteChunkStage stage; - // indicates whether the read from tmp chunk files is allowed - private final boolean readFromTmpFile; - // which term the request is being served in Ratis - private final long term; - // the log index in Ratis log to which the request belongs to - private final long logIndex; - - private final Map container2BCSIDMap; - - private DispatcherContext(long term, long index, WriteChunkStage stage, - boolean readFromTmpFile, Map container2BCSIDMap) { - this.term = term; - this.logIndex = index; - this.stage = stage; - this.readFromTmpFile = readFromTmpFile; - this.container2BCSIDMap = container2BCSIDMap; - } - - public long getLogIndex() { - return logIndex; - } - - public boolean isReadFromTmpFile() { - return readFromTmpFile; - } - - public long getTerm() { - return term; - } - - public WriteChunkStage getStage() { - return stage; - } - - public Map getContainer2BCSIDMap() { - return container2BCSIDMap; - } - - /** - * Builder class for building DispatcherContext. - */ - public static final class Builder { - private WriteChunkStage stage = WriteChunkStage.COMBINED; - private boolean readFromTmpFile = false; - private long term; - private long logIndex; - private Map container2BCSIDMap; - - /** - * Sets the WriteChunkStage. - * - * @param writeChunkStage WriteChunk Stage - * @return DispatcherContext.Builder - */ - public Builder setStage(WriteChunkStage writeChunkStage) { - this.stage = writeChunkStage; - return this; - } - - /** - * Sets the flag for reading from tmp chunk files. - * - * @param setReadFromTmpFile whether to read from tmp chunk file or not - * @return DispatcherContext.Builder - */ - public Builder setReadFromTmpFile(boolean setReadFromTmpFile) { - this.readFromTmpFile = setReadFromTmpFile; - return this; - } - - /** - * Sets the current term for the container request from Ratis. - * - * @param currentTerm current term - * @return DispatcherContext.Builder - */ - public Builder setTerm(long currentTerm) { - this.term = currentTerm; - return this; - } - - /** - * Sets the logIndex for the container request from Ratis. - * - * @param index log index - * @return DispatcherContext.Builder - */ - public Builder setLogIndex(long index) { - this.logIndex = index; - return this; - } - - /** - * Sets the container2BCSIDMap to contain all the containerIds per - * RaftGroup. - * @param map container2BCSIDMap - * @return Builder - */ - public Builder setContainer2BCSIDMap(Map map) { - this.container2BCSIDMap = map; - return this; - } - /** - * Builds and returns DispatcherContext instance. - * - * @return DispatcherContext - */ - public DispatcherContext build() { - return new DispatcherContext(term, logIndex, stage, readFromTmpFile, - container2BCSIDMap); - } - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/RatisServerConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/RatisServerConfiguration.java deleted file mode 100644 index 7f112eacd81cf..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/RatisServerConfiguration.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server.ratis; - -import org.apache.hadoop.hdds.conf.Config; -import org.apache.hadoop.hdds.conf.ConfigGroup; -import org.apache.hadoop.hdds.conf.ConfigTag; -import org.apache.hadoop.hdds.conf.ConfigType; - -/** - * Holds configuration items for Ratis/Raft server. - */ -@ConfigGroup(prefix = "hdds.ratis.server") -public class RatisServerConfiguration { - - private int numSnapshotsRetained; - - @Config(key = "num.snapshots.retained", - type = ConfigType.INT, - defaultValue = "5", - tags = {ConfigTag.STORAGE}, - description = "Config parameter to specify number of old snapshots " + - "retained at the Ratis leader.") - public void setNumSnapshotsRetained(int numSnapshotsRetained) { - this.numSnapshotsRetained = numSnapshotsRetained; - } - - public int getNumSnapshotsRetained() { - return numSnapshotsRetained; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java deleted file mode 100644 index 80e91cdf55de0..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ /dev/null @@ -1,689 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server.ratis; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction; -import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.OzoneConfigKeys; - -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.XceiverServer; - -import io.opentracing.Scope; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.ratis.RaftConfigKeys; -import org.apache.hadoop.hdds.ratis.RatisHelper; -import org.apache.ratis.conf.RaftProperties; -import org.apache.ratis.grpc.GrpcConfigKeys; -import org.apache.ratis.grpc.GrpcFactory; -import org.apache.ratis.grpc.GrpcTlsConfig; -import org.apache.ratis.netty.NettyConfigKeys; -import org.apache.ratis.protocol.*; -import org.apache.ratis.rpc.RpcType; -import org.apache.ratis.rpc.SupportedRpcType; -import org.apache.ratis.server.RaftServer; -import org.apache.ratis.server.RaftServerConfigKeys; -import org.apache.ratis.proto.RaftProtos; -import org.apache.ratis.proto.RaftProtos.RoleInfoProto; -import org.apache.ratis.proto.RaftProtos.ReplicationLevel; -import org.apache.ratis.server.protocol.TermIndex; -import org.apache.ratis.server.impl.RaftServerProxy; -import org.apache.ratis.util.SizeInBytes; -import org.apache.ratis.util.TimeDuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.HashSet; -import java.util.List; -import java.util.Objects; -import java.util.Collections; -import java.util.Set; -import java.util.UUID; -import java.util.ArrayList; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -/** - * Creates a ratis server endpoint that acts as the communication layer for - * Ozone containers. - */ -public final class XceiverServerRatis extends XceiverServer { - private static final Logger LOG = LoggerFactory - .getLogger(XceiverServerRatis.class); - private static final AtomicLong CALL_ID_COUNTER = new AtomicLong(); - - private static long nextCallId() { - return CALL_ID_COUNTER.getAndIncrement() & Long.MAX_VALUE; - } - - private int port; - private final RaftServer server; - private ThreadPoolExecutor chunkExecutor; - private final ContainerDispatcher dispatcher; - private final ContainerController containerController; - private ClientId clientId = ClientId.randomId(); - private final StateContext context; - private final ReplicationLevel replicationLevel; - private long nodeFailureTimeoutMs; - private final long cacheEntryExpiryInteval; - private boolean isStarted = false; - private DatanodeDetails datanodeDetails; - private final OzoneConfiguration conf; - // TODO: Remove the gids set when Ratis supports an api to query active - // pipelines - private final Set raftGids = new HashSet<>(); - - @SuppressWarnings("parameternumber") - private XceiverServerRatis(DatanodeDetails dd, int port, - ContainerDispatcher dispatcher, ContainerController containerController, - StateContext context, GrpcTlsConfig tlsConfig, CertificateClient caClient, - OzoneConfiguration conf) - throws IOException { - super(conf, caClient); - this.conf = conf; - Objects.requireNonNull(dd, "id == null"); - datanodeDetails = dd; - this.port = port; - RaftProperties serverProperties = newRaftProperties(); - final int numWriteChunkThreads = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT); - chunkExecutor = - new ThreadPoolExecutor(numWriteChunkThreads, numWriteChunkThreads, - 100, TimeUnit.SECONDS, - new ArrayBlockingQueue<>(1024), - new ThreadPoolExecutor.CallerRunsPolicy()); - this.context = context; - this.replicationLevel = - conf.getEnum(OzoneConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT); - cacheEntryExpiryInteval = conf.getTimeDuration(OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL, - OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - this.dispatcher = dispatcher; - this.containerController = containerController; - - RaftServer.Builder builder = - RaftServer.newBuilder().setServerId(RatisHelper.toRaftPeerId(dd)) - .setProperties(serverProperties) - .setStateMachineRegistry(this::getStateMachine); - if (tlsConfig != null) { - builder.setParameters(GrpcFactory.newRaftParameters(tlsConfig)); - } - this.server = builder.build(); - } - - private ContainerStateMachine getStateMachine(RaftGroupId gid) { - return new ContainerStateMachine(gid, dispatcher, containerController, - chunkExecutor, this, cacheEntryExpiryInteval, - getSecurityConfig().isBlockTokenEnabled(), getBlockTokenVerifier(), - conf); - } - - private RaftProperties newRaftProperties() { - final RaftProperties properties = new RaftProperties(); - - // Set rpc type - final RpcType rpc = setRpcType(properties); - - // set raft segment size - setRaftSegmentSize(properties); - - // set raft segment pre-allocated size - final int raftSegmentPreallocatedSize = - setRaftSegmentPreallocatedSize(properties); - - // Set max write buffer size, which is the scm chunk size - final int maxChunkSize = setMaxWriteBuffer(properties); - TimeUnit timeUnit; - long duration; - - // set the configs enable and set the stateMachineData sync timeout - RaftServerConfigKeys.Log.StateMachineData.setSync(properties, true); - timeUnit = OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit(); - duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, - OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT - .getDuration(), timeUnit); - final TimeDuration dataSyncTimeout = - TimeDuration.valueOf(duration, timeUnit); - RaftServerConfigKeys.Log.StateMachineData - .setSyncTimeout(properties, dataSyncTimeout); - - // Set the server Request timeout - setServerRequestTimeout(properties); - - // set timeout for a retry cache entry - setTimeoutForRetryCache(properties); - - // Set the ratis leader election timeout - setRatisLeaderElectionTimeout(properties); - - // Set the maximum cache segments - RaftServerConfigKeys.Log.setMaxCachedSegmentNum(properties, 2); - - // set the node failure timeout - setNodeFailureTimeout(properties); - - // Set the ratis storage directory - String storageDir = HddsServerUtil.getOzoneDatanodeRatisDirectory(conf); - RaftServerConfigKeys.setStorageDirs(properties, - Collections.singletonList(new File(storageDir))); - - // For grpc set the maximum message size - GrpcConfigKeys.setMessageSizeMax(properties, - SizeInBytes.valueOf(maxChunkSize + raftSegmentPreallocatedSize)); - - // Set the ratis port number - if (rpc == SupportedRpcType.GRPC) { - GrpcConfigKeys.Server.setPort(properties, port); - } else if (rpc == SupportedRpcType.NETTY) { - NettyConfigKeys.Server.setPort(properties, port); - } - - long snapshotThreshold = - conf.getLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, - OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT); - RaftServerConfigKeys.Snapshot. - setAutoTriggerEnabled(properties, true); - RaftServerConfigKeys.Snapshot. - setAutoTriggerThreshold(properties, snapshotThreshold); - int maxPendingRequets = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT - ); - RaftServerConfigKeys.Write.setElementLimit(properties, maxPendingRequets); - int logQueueNumElements = - conf.getInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT); - final int logQueueByteLimit = (int) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT, - StorageUnit.BYTES); - RaftServerConfigKeys.Log.setQueueElementLimit( - properties, logQueueNumElements); - RaftServerConfigKeys.Log.setQueueByteLimit(properties, logQueueByteLimit); - - int numSyncRetries = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, - OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT); - RaftServerConfigKeys.Log.StateMachineData.setSyncTimeoutRetry(properties, - numSyncRetries); - - // Enable the StateMachineCaching - RaftServerConfigKeys.Log.StateMachineData.setCachingEnabled( - properties, true); - - RaftServerConfigKeys.Log.Appender.setInstallSnapshotEnabled(properties, - false); - - int purgeGap = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT); - RaftServerConfigKeys.Log.setPurgeGap(properties, purgeGap); - - //Set the number of Snapshots Retained. - RatisServerConfiguration ratisServerConfiguration = - conf.getObject(RatisServerConfiguration.class); - int numSnapshotsRetained = - ratisServerConfiguration.getNumSnapshotsRetained(); - RaftServerConfigKeys.Snapshot.setRetentionFileNum(properties, - numSnapshotsRetained); - return properties; - } - - private void setNodeFailureTimeout(RaftProperties properties) { - TimeUnit timeUnit; - long duration; - timeUnit = OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT - .getUnit(); - duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY, - OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT - .getDuration(), timeUnit); - final TimeDuration nodeFailureTimeout = - TimeDuration.valueOf(duration, timeUnit); - RaftServerConfigKeys.Notification.setNoLeaderTimeout(properties, - nodeFailureTimeout); - RaftServerConfigKeys.Rpc.setSlownessTimeout(properties, - nodeFailureTimeout); - nodeFailureTimeoutMs = nodeFailureTimeout.toLong(TimeUnit.MILLISECONDS); - } - - private void setRatisLeaderElectionTimeout(RaftProperties properties) { - long duration; - TimeUnit leaderElectionMinTimeoutUnit = - OzoneConfigKeys. - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT - .getUnit(); - duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, - OzoneConfigKeys. - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT - .getDuration(), leaderElectionMinTimeoutUnit); - final TimeDuration leaderElectionMinTimeout = - TimeDuration.valueOf(duration, leaderElectionMinTimeoutUnit); - RaftServerConfigKeys.Rpc - .setTimeoutMin(properties, leaderElectionMinTimeout); - long leaderElectionMaxTimeout = - leaderElectionMinTimeout.toLong(TimeUnit.MILLISECONDS) + 200; - RaftServerConfigKeys.Rpc.setTimeoutMax(properties, - TimeDuration.valueOf(leaderElectionMaxTimeout, TimeUnit.MILLISECONDS)); - } - - private void setTimeoutForRetryCache(RaftProperties properties) { - TimeUnit timeUnit; - long duration; - timeUnit = - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT - .getUnit(); - duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY, - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT - .getDuration(), timeUnit); - final TimeDuration retryCacheTimeout = - TimeDuration.valueOf(duration, timeUnit); - RaftServerConfigKeys.RetryCache - .setExpiryTime(properties, retryCacheTimeout); - } - - private void setServerRequestTimeout(RaftProperties properties) { - TimeUnit timeUnit; - long duration; - timeUnit = OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT - .getUnit(); - duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY, - OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT - .getDuration(), timeUnit); - final TimeDuration serverRequestTimeout = - TimeDuration.valueOf(duration, timeUnit); - RaftServerConfigKeys.Rpc - .setRequestTimeout(properties, serverRequestTimeout); - } - - private int setMaxWriteBuffer(RaftProperties properties) { - final int maxChunkSize = OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE; - RaftServerConfigKeys.Log.setWriteBufferSize(properties, - SizeInBytes.valueOf(maxChunkSize)); - return maxChunkSize; - } - - private int setRaftSegmentPreallocatedSize(RaftProperties properties) { - final int raftSegmentPreallocatedSize = (int) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, - StorageUnit.BYTES); - int logAppenderQueueNumElements = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, - OzoneConfigKeys - .DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); - final int logAppenderQueueByteLimit = (int) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, - OzoneConfigKeys - .DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, - StorageUnit.BYTES); - RaftServerConfigKeys.Log.Appender - .setBufferElementLimit(properties, logAppenderQueueNumElements); - RaftServerConfigKeys.Log.Appender.setBufferByteLimit(properties, - SizeInBytes.valueOf(logAppenderQueueByteLimit)); - RaftServerConfigKeys.Log.setPreallocatedSize(properties, - SizeInBytes.valueOf(raftSegmentPreallocatedSize)); - return raftSegmentPreallocatedSize; - } - - private void setRaftSegmentSize(RaftProperties properties) { - final int raftSegmentSize = (int)conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT, - StorageUnit.BYTES); - RaftServerConfigKeys.Log.setSegmentSizeMax(properties, - SizeInBytes.valueOf(raftSegmentSize)); - } - - private RpcType setRpcType(RaftProperties properties) { - final String rpcType = conf.get( - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); - final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType); - RaftConfigKeys.Rpc.setType(properties, rpc); - return rpc; - } - - public static XceiverServerRatis newXceiverServerRatis( - DatanodeDetails datanodeDetails, OzoneConfiguration ozoneConf, - ContainerDispatcher dispatcher, ContainerController containerController, - CertificateClient caClient, StateContext context) throws IOException { - int localPort = ozoneConf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT); - - // Get an available port on current node and - // use that as the container port - if (ozoneConf.getBoolean(OzoneConfigKeys - .DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT)) { - localPort = 0; - } - GrpcTlsConfig tlsConfig = RatisHelper.createTlsServerConfigForDN( - new SecurityConfig(ozoneConf), caClient); - - return new XceiverServerRatis(datanodeDetails, localPort, dispatcher, - containerController, context, tlsConfig, caClient, ozoneConf); - } - - @Override - public void start() throws IOException { - if (!isStarted) { - LOG.info("Starting {} {} at port {}", getClass().getSimpleName(), - server.getId(), getIPCPort()); - chunkExecutor.prestartAllCoreThreads(); - server.start(); - - int realPort = - ((RaftServerProxy) server).getServerRpc().getInetSocketAddress() - .getPort(); - - if (port == 0) { - LOG.info("{} {} is started using port {}", getClass().getSimpleName(), - server.getId(), realPort); - port = realPort; - } - - //register the real port to the datanode details. - datanodeDetails.setPort(DatanodeDetails - .newPort(DatanodeDetails.Port.Name.RATIS, - realPort)); - - isStarted = true; - } - } - - @Override - public void stop() { - if (isStarted) { - try { - // shutdown server before the executors as while shutting down, - // some of the tasks would be executed using the executors. - server.close(); - chunkExecutor.shutdown(); - isStarted = false; - } catch (IOException e) { - throw new RuntimeException(e); - } - } - } - - @Override - public int getIPCPort() { - return port; - } - - /** - * Returns the Replication type supported by this end-point. - * - * @return enum -- {Stand_Alone, Ratis, Chained} - */ - @Override - public HddsProtos.ReplicationType getServerType() { - return HddsProtos.ReplicationType.RATIS; - } - - @VisibleForTesting - public RaftServer getServer() { - return server; - } - - private void processReply(RaftClientReply reply) throws IOException { - // NotLeader exception is thrown only when the raft server to which the - // request is submitted is not the leader. The request will be rejected - // and will eventually be executed once the request comes via the leader - // node. - NotLeaderException notLeaderException = reply.getNotLeaderException(); - if (notLeaderException != null) { - throw notLeaderException; - } - StateMachineException stateMachineException = - reply.getStateMachineException(); - if (stateMachineException != null) { - throw stateMachineException; - } - } - - @Override - public void submitRequest(ContainerCommandRequestProto request, - HddsProtos.PipelineID pipelineID) throws IOException { - super.submitRequest(request, pipelineID); - RaftClientReply reply; - try (Scope scope = TracingUtil - .importAndCreateScope( - "XceiverServerRatis." + request.getCmdType().name(), - request.getTraceID())) { - - RaftClientRequest raftClientRequest = - createRaftClientRequest(request, pipelineID, - RaftClientRequest.writeRequestType()); - try { - reply = server.submitClientRequestAsync(raftClientRequest).get(); - } catch (Exception e) { - throw new IOException(e.getMessage(), e); - } - processReply(reply); - } - } - - private RaftClientRequest createRaftClientRequest( - ContainerCommandRequestProto request, HddsProtos.PipelineID pipelineID, - RaftClientRequest.Type type) { - return new RaftClientRequest(clientId, server.getId(), - RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineID).getId()), - nextCallId(), ContainerCommandRequestMessage.toMessage(request, null), - type, null); - } - - private GroupInfoRequest createGroupInfoRequest( - HddsProtos.PipelineID pipelineID) { - return new GroupInfoRequest(clientId, server.getId(), - RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineID).getId()), - nextCallId()); - } - - private void handlePipelineFailure(RaftGroupId groupId, - RoleInfoProto roleInfoProto) { - String msg; - UUID datanode = RatisHelper.toDatanodeId(roleInfoProto.getSelf()); - RaftPeerId id = RaftPeerId.valueOf(roleInfoProto.getSelf().getId()); - switch (roleInfoProto.getRole()) { - case CANDIDATE: - msg = datanode + " is in candidate state for " + - roleInfoProto.getCandidateInfo().getLastLeaderElapsedTimeMs() + "ms"; - break; - case LEADER: - StringBuilder sb = new StringBuilder(); - sb.append(datanode).append(" has not seen follower/s"); - for (RaftProtos.ServerRpcProto follower : roleInfoProto.getLeaderInfo() - .getFollowerInfoList()) { - if (follower.getLastRpcElapsedTimeMs() > nodeFailureTimeoutMs) { - sb.append(" ").append(RatisHelper.toDatanodeId(follower.getId())) - .append(" for ").append(follower.getLastRpcElapsedTimeMs()) - .append("ms"); - } - } - msg = sb.toString(); - break; - default: - LOG.error("unknown state:" + roleInfoProto.getRole()); - throw new IllegalStateException("node" + id + " is in illegal role " - + roleInfoProto.getRole()); - } - - triggerPipelineClose(groupId, msg, - ClosePipelineInfo.Reason.PIPELINE_FAILED, false); - } - - private void triggerPipelineClose(RaftGroupId groupId, String detail, - ClosePipelineInfo.Reason reasonCode, boolean triggerHB) { - PipelineID pipelineID = PipelineID.valueOf(groupId.getUuid()); - ClosePipelineInfo.Builder closePipelineInfo = - ClosePipelineInfo.newBuilder() - .setPipelineID(pipelineID.getProtobuf()) - .setReason(reasonCode) - .setDetailedReason(detail); - - PipelineAction action = PipelineAction.newBuilder() - .setClosePipeline(closePipelineInfo) - .setAction(PipelineAction.Action.CLOSE) - .build(); - context.addPipelineActionIfAbsent(action); - // wait for the next HB timeout or right away? - if (triggerHB) { - context.getParent().triggerHeartbeat(); - } - LOG.error( - "pipeline Action " + action.getAction() + " on pipeline " + pipelineID - + ".Reason : " + action.getClosePipeline().getDetailedReason()); - } - - @Override - public boolean isExist(HddsProtos.PipelineID pipelineId) { - return raftGids.contains( - RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineId).getId())); - } - - @Override - public List getPipelineReport() { - try { - Iterable gids = server.getGroupIds(); - List reports = new ArrayList<>(); - for (RaftGroupId groupId : gids) { - reports.add(PipelineReport.newBuilder() - .setPipelineID(PipelineID.valueOf(groupId.getUuid()).getProtobuf()) - .build()); - } - return reports; - } catch (Exception e) { - return null; - } - } - - @VisibleForTesting - public List getPipelineIds() { - Iterable gids = server.getGroupIds(); - List pipelineIDs = new ArrayList<>(); - for (RaftGroupId groupId : gids) { - pipelineIDs.add(PipelineID.valueOf(groupId.getUuid())); - LOG.info("pipeline id {}", PipelineID.valueOf(groupId.getUuid())); - } - return pipelineIDs; - } - - void handleNodeSlowness(RaftGroupId groupId, RoleInfoProto roleInfoProto) { - handlePipelineFailure(groupId, roleInfoProto); - } - - void handleNoLeader(RaftGroupId groupId, RoleInfoProto roleInfoProto) { - handlePipelineFailure(groupId, roleInfoProto); - } - - void handleApplyTransactionFailure(RaftGroupId groupId, - RaftProtos.RaftPeerRole role) { - UUID dnId = RatisHelper.toDatanodeId(getServer().getId()); - String msg = - "Ratis Transaction failure in datanode " + dnId + " with role " + role - + " .Triggering pipeline close action."; - triggerPipelineClose(groupId, msg, - ClosePipelineInfo.Reason.STATEMACHINE_TRANSACTION_FAILED, true); - } - /** - * The fact that the snapshot contents cannot be used to actually catch up - * the follower, it is the reason to initiate close pipeline and - * not install the snapshot. The follower will basically never be able to - * catch up. - * - * @param groupId raft group information - * @param roleInfoProto information about the current node role and - * rpc delay information. - * @param firstTermIndexInLog After the snapshot installation is complete, - * return the last included term index in the snapshot. - */ - void handleInstallSnapshotFromLeader(RaftGroupId groupId, - RoleInfoProto roleInfoProto, - TermIndex firstTermIndexInLog) { - LOG.warn("Install snapshot notification received from Leader with " + - "termIndex: {}, terminating pipeline: {}", - firstTermIndexInLog, groupId); - handlePipelineFailure(groupId, roleInfoProto); - } - - /** - * Notify the Datanode Ratis endpoint of Ratis log failure. - * Expected to be invoked from the Container StateMachine - * @param groupId the Ratis group/pipeline for which log has failed - * @param t exception encountered at the time of the failure - * - */ - @VisibleForTesting - public void handleNodeLogFailure(RaftGroupId groupId, Throwable t) { - String msg = (t == null) ? "Unspecified failure reported in Ratis log" - : t.getMessage(); - - triggerPipelineClose(groupId, msg, - ClosePipelineInfo.Reason.PIPELINE_LOG_FAILED, true); - } - - public long getMinReplicatedIndex(PipelineID pipelineID) throws IOException { - Long minIndex; - GroupInfoReply reply = getServer() - .getGroupInfo(createGroupInfoRequest(pipelineID.getProtobuf())); - minIndex = RatisHelper.getMinReplicatedIndex(reply.getCommitInfos()); - return minIndex == null ? -1 : minIndex.longValue(); - } - - void notifyGroupRemove(RaftGroupId gid) { - raftGids.remove(gid); - } - - void notifyGroupAdd(RaftGroupId gid) { - raftGids.add(gid); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java deleted file mode 100644 index 8debfe02837b4..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.transport.server.ratis; - -/** - * This package contains classes for the server implementation - * using Apache Ratis - */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java deleted file mode 100644 index 4ddb4e48792fb..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.utils; - -import com.google.common.base.Preconditions; -import org.apache.commons.collections.MapIterator; -import org.apache.commons.collections.map.LRUMap; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.utils.MetadataStore; -import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -/** - * container cache is a LRUMap that maintains the DB handles. - */ -public final class ContainerCache extends LRUMap { - private static final Logger LOG = - LoggerFactory.getLogger(ContainerCache.class); - private final Lock lock = new ReentrantLock(); - private static ContainerCache cache; - private static final float LOAD_FACTOR = 0.75f; - /** - * Constructs a cache that holds DBHandle references. - */ - private ContainerCache(int maxSize, float loadFactor, boolean - scanUntilRemovable) { - super(maxSize, loadFactor, scanUntilRemovable); - } - - /** - * Return a singleton instance of {@link ContainerCache} - * that holds the DB handlers. - * - * @param conf - Configuration. - * @return A instance of {@link ContainerCache}. - */ - public synchronized static ContainerCache getInstance(Configuration conf) { - if (cache == null) { - int cacheSize = conf.getInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, - OzoneConfigKeys.OZONE_CONTAINER_CACHE_DEFAULT); - cache = new ContainerCache(cacheSize, LOAD_FACTOR, true); - } - return cache; - } - - /** - * Closes all the db instances and resets the cache. - */ - public void shutdownCache() { - lock.lock(); - try { - // iterate the cache and close each db - MapIterator iterator = cache.mapIterator(); - while (iterator.hasNext()) { - iterator.next(); - ReferenceCountedDB db = (ReferenceCountedDB) iterator.getValue(); - Preconditions.checkArgument(db.cleanup(), "refCount:", - db.getReferenceCount()); - } - // reset the cache - cache.clear(); - } finally { - lock.unlock(); - } - } - - /** - * {@inheritDoc} - */ - @Override - protected boolean removeLRU(LinkEntry entry) { - ReferenceCountedDB db = (ReferenceCountedDB) entry.getValue(); - lock.lock(); - try { - return db.cleanup(); - } finally { - lock.unlock(); - } - } - - /** - * Returns a DB handle if available, create the handler otherwise. - * - * @param containerID - ID of the container. - * @param containerDBType - DB type of the container. - * @param containerDBPath - DB path of the container. - * @param conf - Hadoop Configuration. - * @return ReferenceCountedDB. - */ - public ReferenceCountedDB getDB(long containerID, String containerDBType, - String containerDBPath, Configuration conf) - throws IOException { - Preconditions.checkState(containerID >= 0, - "Container ID cannot be negative."); - lock.lock(); - try { - ReferenceCountedDB db = (ReferenceCountedDB) this.get(containerDBPath); - - if (db == null) { - MetadataStore metadataStore = - MetadataStoreBuilder.newBuilder() - .setDbFile(new File(containerDBPath)) - .setCreateIfMissing(false) - .setConf(conf) - .setDBType(containerDBType) - .build(); - db = new ReferenceCountedDB(metadataStore, containerDBPath); - this.put(containerDBPath, db); - } - // increment the reference before returning the object - db.incrementReference(); - return db; - } catch (Exception e) { - LOG.error("Error opening DB. Container:{} ContainerPath:{}", - containerID, containerDBPath, e); - throw e; - } finally { - lock.unlock(); - } - } - - /** - * Remove a DB handler from cache. - * - * @param containerDBPath - path of the container db file. - */ - public void removeDB(String containerDBPath) { - lock.lock(); - try { - ReferenceCountedDB db = (ReferenceCountedDB)this.get(containerDBPath); - if (db != null) { - Preconditions.checkArgument(db.cleanup(), "refCount:", - db.getReferenceCount()); - } - this.remove(containerDBPath); - } finally { - lock.unlock(); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java deleted file mode 100644 index cb356dadeb236..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java +++ /dev/null @@ -1,219 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.utils; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.InconsistentStorageStateException; -import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; - -import java.io.File; -import java.io.IOException; -import java.util.Properties; -import java.util.UUID; - -/** - * A util class for {@link HddsVolume}. - */ -public final class HddsVolumeUtil { - - // Private constructor for Utility class. Unused. - private HddsVolumeUtil() { - } - - private static final String VERSION_FILE = "VERSION"; - private static final String STORAGE_ID_PREFIX = "DS-"; - - public static File getVersionFile(File rootDir) { - return new File(rootDir, VERSION_FILE); - } - - public static String generateUuid() { - return STORAGE_ID_PREFIX + UUID.randomUUID(); - } - - /** - * Get hddsRoot from volume root. If volumeRoot points to hddsRoot, it is - * returned as is. - * For a volumeRoot /data/disk1, the hddsRoot is /data/disk1/hdds. - * @param volumeRoot root of the volume. - * @return hddsRoot of the volume. - */ - public static String getHddsRoot(String volumeRoot) { - if (volumeRoot.endsWith(HddsVolume.HDDS_VOLUME_DIR)) { - return volumeRoot; - } else { - File hddsRoot = new File(volumeRoot, HddsVolume.HDDS_VOLUME_DIR); - return hddsRoot.getPath(); - } - } - - /** - * Returns storageID if it is valid. Throws an exception otherwise. - */ - @VisibleForTesting - public static String getStorageID(Properties props, File versionFile) - throws InconsistentStorageStateException { - return getProperty(props, OzoneConsts.STORAGE_ID, versionFile); - } - - /** - * Returns clusterID if it is valid. It should match the clusterID from the - * Datanode. Throws an exception otherwise. - */ - @VisibleForTesting - public static String getClusterID(Properties props, File versionFile, - String clusterID) throws InconsistentStorageStateException { - String cid = getProperty(props, OzoneConsts.CLUSTER_ID, versionFile); - - if (clusterID == null) { - return cid; - } - if (!clusterID.equals(cid)) { - throw new InconsistentStorageStateException("Mismatched " + - "ClusterIDs. Version File : " + versionFile + " has clusterID: " + - cid + " and Datanode has clusterID: " + clusterID); - } - return cid; - } - - /** - * Returns datanodeUuid if it is valid. It should match the UUID of the - * Datanode. Throws an exception otherwise. - */ - @VisibleForTesting - public static String getDatanodeUUID(Properties props, File versionFile, - String datanodeUuid) - throws InconsistentStorageStateException { - String datanodeID = getProperty(props, OzoneConsts.DATANODE_UUID, - versionFile); - - if (datanodeUuid != null && !datanodeUuid.equals(datanodeID)) { - throw new InconsistentStorageStateException("Mismatched " + - "DatanodeUUIDs. Version File : " + versionFile + " has datanodeUuid: " - + datanodeID + " and Datanode has datanodeUuid: " + datanodeUuid); - } - return datanodeID; - } - - /** - * Returns creationTime if it is valid. Throws an exception otherwise. - */ - @VisibleForTesting - public static long getCreationTime(Properties props, File versionFile) - throws InconsistentStorageStateException { - String cTimeStr = getProperty(props, OzoneConsts.CTIME, versionFile); - - long cTime = Long.parseLong(cTimeStr); - long currentTime = Time.now(); - if (cTime > currentTime || cTime < 0) { - throw new InconsistentStorageStateException("Invalid Creation time in " + - "Version File : " + versionFile + " - " + cTime + ". Current system" + - " time is " + currentTime); - } - return cTime; - } - - /** - * Returns layOutVersion if it is valid. Throws an exception otherwise. - */ - @VisibleForTesting - public static int getLayOutVersion(Properties props, File versionFile) throws - InconsistentStorageStateException { - String lvStr = getProperty(props, OzoneConsts.LAYOUTVERSION, versionFile); - - int lv = Integer.parseInt(lvStr); - if(DataNodeLayoutVersion.getLatestVersion().getVersion() != lv) { - throw new InconsistentStorageStateException("Invalid layOutVersion. " + - "Version file has layOutVersion as " + lv + " and latest Datanode " + - "layOutVersion is " + - DataNodeLayoutVersion.getLatestVersion().getVersion()); - } - return lv; - } - - private static String getProperty(Properties props, String propName, File - versionFile) - throws InconsistentStorageStateException { - String value = props.getProperty(propName); - if (StringUtils.isBlank(value)) { - throw new InconsistentStorageStateException("Invalid " + propName + - ". Version File : " + versionFile + " has null or empty " + propName); - } - return value; - } - - /** - * Check Volume is in consistent state or not. - * @param hddsVolume - * @param scmId - * @param clusterId - * @param logger - * @return true - if volume is in consistent state, otherwise false. - */ - public static boolean checkVolume(HddsVolume hddsVolume, String scmId, String - clusterId, Logger logger) { - File hddsRoot = hddsVolume.getHddsRootDir(); - String volumeRoot = hddsRoot.getPath(); - File scmDir = new File(hddsRoot, scmId); - - try { - hddsVolume.format(clusterId); - } catch (IOException ex) { - logger.error("Error during formatting volume {}, exception is {}", - volumeRoot, ex); - return false; - } - - File[] hddsFiles = hddsRoot.listFiles(); - - if(hddsFiles == null) { - // This is the case for IOException, where listFiles returns null. - // So, we fail the volume. - return false; - } else if (hddsFiles.length == 1) { - // DN started for first time or this is a newly added volume. - // So we create scm directory. - if (!scmDir.mkdir()) { - logger.error("Unable to create scmDir {}", scmDir); - return false; - } - return true; - } else if(hddsFiles.length == 2) { - // The files should be Version and SCM directory - if (scmDir.exists()) { - return true; - } else { - logger.error("Volume {} is in Inconsistent state, expected scm " + - "directory {} does not exist", volumeRoot, scmDir - .getAbsolutePath()); - return false; - } - } else { - // The hdds root dir should always have 2 files. One is Version file - // and other is SCM directory. - return false; - } - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java deleted file mode 100644 index fb143a407f7f4..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.utils; - -import com.google.common.base.Preconditions; - -import org.apache.commons.lang.exception.ExceptionUtils; -import org.apache.hadoop.hdds.utils.MetadataStore; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Class to implement reference counting over instances handed by Container - * Cache. - * Enable DEBUG log below will enable us quickly locate the leaked reference - * from caller stack. When JDK9 StackWalker is available, we can switch to - * StackWalker instead of new Exception().printStackTrace(). - */ -public class ReferenceCountedDB implements Closeable { - private static final Logger LOG = - LoggerFactory.getLogger(ReferenceCountedDB.class); - private final AtomicInteger referenceCount; - private final MetadataStore store; - private final String containerDBPath; - - public ReferenceCountedDB(MetadataStore store, String containerDBPath) { - this.referenceCount = new AtomicInteger(0); - this.store = store; - this.containerDBPath = containerDBPath; - } - - public long getReferenceCount() { - return referenceCount.get(); - } - - public void incrementReference() { - this.referenceCount.incrementAndGet(); - if (LOG.isTraceEnabled()) { - LOG.trace("IncRef {} to refCnt {}, stackTrace: {}", containerDBPath, - referenceCount.get(), ExceptionUtils.getStackTrace(new Throwable())); - } - } - - public void decrementReference() { - int refCount = this.referenceCount.decrementAndGet(); - Preconditions.checkArgument(refCount >= 0, "refCount:", refCount); - if (LOG.isTraceEnabled()) { - LOG.trace("DecRef {} to refCnt {}, stackTrace: {}", containerDBPath, - referenceCount.get(), ExceptionUtils.getStackTrace(new Throwable())); - } - } - - public boolean cleanup() { - if (referenceCount.get() == 0 && store != null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Close {} refCnt {}", containerDBPath, - referenceCount.get()); - } - try { - store.close(); - return true; - } catch (Exception e) { - LOG.error("Error closing DB. Container: " + containerDBPath, e); - return false; - } - } else { - return false; - } - } - - public MetadataStore getStore() { - return store; - } - - public void close() { - decrementReference(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java deleted file mode 100644 index 08264f084a031..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.utils; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java deleted file mode 100644 index c0c719bbc854c..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java +++ /dev/null @@ -1,1298 +0,0 @@ -/* - * Copyright (C) 2007 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * Some portions of this class have been modified to make it functional in this - * package. - */ -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.annotations.Beta; -import com.google.common.annotations.GwtCompatible; -import com.google.common.base.Preconditions; -import static com.google.common.base.Preconditions.checkNotNull; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; -import com.google.common.util.concurrent.Uninterruptibles; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater - .newUpdater; - -import javax.annotation.Nullable; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; -import java.util.concurrent.CancellationException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; -import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy; -import java.util.concurrent.locks.LockSupport; -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * An abstract implementation of {@link ListenableFuture}, intended for - * advanced users only. More common ways to create a {@code ListenableFuture} - * include instantiating a {@link SettableFuture}, submitting a task to a - * {@link ListeningExecutorService}, and deriving a {@code Future} from an - * existing one, typically using methods like {@link Futures#transform - * (ListenableFuture, com.google.common.base.Function) Futures.transform} - * and its overloaded versions. - *

- *

This class implements all methods in {@code ListenableFuture}. - * Subclasses should provide a way to set the result of the computation - * through the protected methods {@link #set(Object)}, - * {@link #setFuture(ListenableFuture)} and {@link #setException(Throwable)}. - * Subclasses may also override {@link #interruptTask()}, which will be - * invoked automatically if a call to {@link #cancel(boolean) cancel(true)} - * succeeds in canceling the future. Subclasses should rarely override other - * methods. - */ - -@GwtCompatible(emulated = true) -public abstract class AbstractFuture implements ListenableFuture { - // NOTE: Whenever both tests are cheap and functional, it's faster to use &, - // | instead of &&, || - - private static final boolean GENERATE_CANCELLATION_CAUSES = - Boolean.parseBoolean( - System.getProperty("guava.concurrent.generate_cancellation_cause", - "false")); - - /** - * A less abstract subclass of AbstractFuture. This can be used to optimize - * setFuture by ensuring that {@link #get} calls exactly the implementation - * of {@link AbstractFuture#get}. - */ - abstract static class TrustedFuture extends AbstractFuture { - @Override - public final V get() throws InterruptedException, ExecutionException { - return super.get(); - } - - @Override - public final V get(long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { - return super.get(timeout, unit); - } - - @Override - public final boolean isDone() { - return super.isDone(); - } - - @Override - public final boolean isCancelled() { - return super.isCancelled(); - } - - @Override - public final void addListener(Runnable listener, Executor executor) { - super.addListener(listener, executor); - } - - @Override - public final boolean cancel(boolean mayInterruptIfRunning) { - return super.cancel(mayInterruptIfRunning); - } - } - - // Logger to log exceptions caught when running listeners. - private static final Logger LOG = Logger - .getLogger(AbstractFuture.class.getName()); - - // A heuristic for timed gets. If the remaining timeout is less than this, - // spin instead of - // blocking. This value is what AbstractQueuedSynchronizer uses. - private static final long SPIN_THRESHOLD_NANOS = 1000L; - - private static final AtomicHelper ATOMIC_HELPER; - - static { - AtomicHelper helper; - - try { - helper = new UnsafeAtomicHelper(); - } catch (Throwable unsafeFailure) { - // catch absolutely everything and fall through to our 'SafeAtomicHelper' - // The access control checks that ARFU does means the caller class has - // to be AbstractFuture - // instead of SafeAtomicHelper, so we annoyingly define these here - try { - helper = - new SafeAtomicHelper( - newUpdater(Waiter.class, Thread.class, "thread"), - newUpdater(Waiter.class, Waiter.class, "next"), - newUpdater(AbstractFuture.class, Waiter.class, "waiters"), - newUpdater(AbstractFuture.class, Listener.class, "listeners"), - newUpdater(AbstractFuture.class, Object.class, "value")); - } catch (Throwable atomicReferenceFieldUpdaterFailure) { - // Some Android 5.0.x Samsung devices have bugs in JDK reflection APIs - // that cause getDeclaredField to throw a NoSuchFieldException when - // the field is definitely there. - // For these users fallback to a suboptimal implementation, based on - // synchronized. This will be a definite performance hit to those users. - LOG.log(Level.SEVERE, "UnsafeAtomicHelper is broken!", unsafeFailure); - LOG.log( - Level.SEVERE, "SafeAtomicHelper is broken!", - atomicReferenceFieldUpdaterFailure); - helper = new SynchronizedHelper(); - } - } - ATOMIC_HELPER = helper; - - // Prevent rare disastrous classloading in first call to LockSupport.park. - // See: https://bugs.openjdk.java.net/browse/JDK-8074773 - @SuppressWarnings("unused") - @SuppressFBWarnings - Class ensureLoaded = LockSupport.class; - } - - /** - * Waiter links form a Treiber stack, in the {@link #waiters} field. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Waiter { - static final Waiter TOMBSTONE = new Waiter(false /* ignored param */); - - @Nullable volatile Thread thread; - @Nullable volatile Waiter next; - - /** - * Constructor for the TOMBSTONE, avoids use of ATOMIC_HELPER in case this - * class is loaded before the ATOMIC_HELPER. Apparently this is possible - * on some android platforms. - */ - Waiter(boolean unused) { - } - - Waiter() { - // avoid volatile write, write is made visible by subsequent CAS on - // waiters field - ATOMIC_HELPER.putThread(this, Thread.currentThread()); - } - - // non-volatile write to the next field. Should be made visible by - // subsequent CAS on waiters field. - void setNext(Waiter next) { - ATOMIC_HELPER.putNext(this, next); - } - - void unpark() { - // This is racy with removeWaiter. The consequence of the race is that - // we may spuriously call unpark even though the thread has already - // removed itself from the list. But even if we did use a CAS, that - // race would still exist (it would just be ever so slightly smaller). - Thread w = thread; - if (w != null) { - thread = null; - LockSupport.unpark(w); - } - } - } - - /** - * Marks the given node as 'deleted' (null waiter) and then scans the list - * to unlink all deleted nodes. This is an O(n) operation in the common - * case (and O(n^2) in the worst), but we are saved by two things. - *

    - *
  • This is only called when a waiting thread times out or is - * interrupted. Both of which should be rare. - *
  • The waiters list should be very short. - *
- */ - private void removeWaiter(Waiter node) { - node.thread = null; // mark as 'deleted' - restart: - while (true) { - Waiter pred = null; - Waiter curr = waiters; - if (curr == Waiter.TOMBSTONE) { - return; // give up if someone is calling complete - } - Waiter succ; - while (curr != null) { - succ = curr.next; - if (curr.thread != null) { // we aren't unlinking this node, update - // pred. - pred = curr; - } else if (pred != null) { // We are unlinking this node and it has a - // predecessor. - pred.next = succ; - if (pred.thread == null) { // We raced with another node that - // unlinked pred. Restart. - continue restart; - } - } else if (!ATOMIC_HELPER - .casWaiters(this, curr, succ)) { // We are unlinking head - continue restart; // We raced with an add or complete - } - curr = succ; - } - break; - } - } - - /** - * Listeners also form a stack through the {@link #listeners} field. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Listener { - static final Listener TOMBSTONE = new Listener(null, null); - final Runnable task; - final Executor executor; - - // writes to next are made visible by subsequent CAS's on the listeners - // field - @Nullable Listener next; - - Listener(Runnable task, Executor executor) { - this.task = task; - this.executor = executor; - } - } - - /** - * A special value to represent {@code null}. - */ - private static final Object NULL = new Object(); - - /** - * A special value to represent failure, when {@link #setException} is - * called successfully. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Failure { - static final Failure FALLBACK_INSTANCE = - new Failure( - new Throwable("Failure occurred while trying to finish a future" + - ".") { - @Override - public synchronized Throwable fillInStackTrace() { - return this; // no stack trace - } - }); - final Throwable exception; - - Failure(Throwable exception) { - this.exception = checkNotNull(exception); - } - } - - /** - * A special value to represent cancellation and the 'wasInterrupted' bit. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Cancellation { - final boolean wasInterrupted; - @Nullable final Throwable cause; - - Cancellation(boolean wasInterrupted, @Nullable Throwable cause) { - this.wasInterrupted = wasInterrupted; - this.cause = cause; - } - } - - /** - * A special value that encodes the 'setFuture' state. - */ - @SuppressWarnings("visibilitymodifier") - private static final class SetFuture implements Runnable { - final AbstractFuture owner; - final ListenableFuture future; - - SetFuture(AbstractFuture owner, ListenableFuture future) { - this.owner = owner; - this.future = future; - } - - @Override - public void run() { - if (owner.value != this) { - // nothing to do, we must have been cancelled, don't bother inspecting - // the future. - return; - } - Object valueToSet = getFutureValue(future); - if (ATOMIC_HELPER.casValue(owner, this, valueToSet)) { - complete(owner); - } - } - } - - /** - * This field encodes the current state of the future. - *

- *

The valid values are: - *

    - *
  • {@code null} initial state, nothing has happened. - *
  • {@link Cancellation} terminal state, {@code cancel} was called. - *
  • {@link Failure} terminal state, {@code setException} was called. - *
  • {@link SetFuture} intermediate state, {@code setFuture} was called. - *
  • {@link #NULL} terminal state, {@code set(null)} was called. - *
  • Any other non-null value, terminal state, {@code set} was called with - * a non-null argument. - *
- */ - private volatile Object value; - - /** - * All listeners. - */ - private volatile Listener listeners; - - /** - * All waiting threads. - */ - private volatile Waiter waiters; - - /** - * Constructor for use by subclasses. - */ - protected AbstractFuture() { - } - - // Gets and Timed Gets - // - // * Be responsive to interruption - // * Don't create Waiter nodes if you aren't going to park, this helps - // reduce contention on the waiters field. - // * Future completion is defined by when #value becomes non-null/non - // SetFuture - // * Future completion can be observed if the waiters field contains a - // TOMBSTONE - - // Timed Get - // There are a few design constraints to consider - // * We want to be responsive to small timeouts, unpark() has non trivial - // latency overheads (I have observed 12 micros on 64 bit linux systems to - // wake up a parked thread). So if the timeout is small we shouldn't park(). - // This needs to be traded off with the cpu overhead of spinning, so we use - // SPIN_THRESHOLD_NANOS which is what AbstractQueuedSynchronizer uses for - // similar purposes. - // * We want to behave reasonably for timeouts of 0 - // * We are more responsive to completion than timeouts. This is because - // parkNanos depends on system scheduling and as such we could either miss - // our deadline, or unpark() could be delayed so that it looks like we - // timed out even though we didn't. For comparison FutureTask respects - // completion preferably and AQS is non-deterministic (depends on where in - // the queue the waiter is). If we wanted to be strict about it, we could - // store the unpark() time in the Waiter node and we could use that to make - // a decision about whether or not we timed out prior to being unparked. - - /* - * Improve the documentation of when InterruptedException is thrown. Our - * behavior matches the JDK's, but the JDK's documentation is misleading. - */ - - /** - * {@inheritDoc} - *

- *

The default {@link AbstractFuture} implementation throws {@code - * InterruptedException} if the current thread is interrupted before or - * during the call, even if the value is already available. - * - * @throws InterruptedException if the current thread was interrupted - * before or during the call - * (optional but recommended). - * @throws CancellationException {@inheritDoc} - */ - @Override - public V get(long timeout, TimeUnit unit) - throws InterruptedException, TimeoutException, ExecutionException { - // NOTE: if timeout < 0, remainingNanos will be < 0 and we will fall into - // the while(true) loop at the bottom and throw a timeoutexception. - long remainingNanos = unit - .toNanos(timeout); // we rely on the implicit null check on unit. - if (Thread.interrupted()) { - throw new InterruptedException(); - } - Object localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - // we delay calling nanoTime until we know we will need to either park or - // spin - final long endNanos = remainingNanos > 0 ? System - .nanoTime() + remainingNanos : 0; - long_wait_loop: - if (remainingNanos >= SPIN_THRESHOLD_NANOS) { - Waiter oldHead = waiters; - if (oldHead != Waiter.TOMBSTONE) { - Waiter node = new Waiter(); - do { - node.setNext(oldHead); - if (ATOMIC_HELPER.casWaiters(this, oldHead, node)) { - while (true) { - LockSupport.parkNanos(this, remainingNanos); - // Check interruption first, if we woke up due to interruption - // we need to honor that. - if (Thread.interrupted()) { - removeWaiter(node); - throw new InterruptedException(); - } - - // Otherwise re-read and check doneness. If we loop then it must - // have been a spurious wakeup - localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - - // timed out? - remainingNanos = endNanos - System.nanoTime(); - if (remainingNanos < SPIN_THRESHOLD_NANOS) { - // Remove the waiter, one way or another we are done parking - // this thread. - removeWaiter(node); - break long_wait_loop; // jump down to the busy wait loop - } - } - } - oldHead = waiters; // re-read and loop. - } while (oldHead != Waiter.TOMBSTONE); - } - // re-read value, if we get here then we must have observed a TOMBSTONE - // while trying to add a waiter. - return getDoneValue(value); - } - // If we get here then we have remainingNanos < SPIN_THRESHOLD_NANOS and - // there is no node on the waiters list - while (remainingNanos > 0) { - localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - if (Thread.interrupted()) { - throw new InterruptedException(); - } - remainingNanos = endNanos - System.nanoTime(); - } - throw new TimeoutException(); - } - - /* - * Improve the documentation of when InterruptedException is thrown. Our - * behavior matches the JDK's, but the JDK's documentation is misleading. - */ - - /** - * {@inheritDoc} - *

- *

The default {@link AbstractFuture} implementation throws {@code - * InterruptedException} if the current thread is interrupted before or - * during the call, even if the value is already available. - * - * @throws InterruptedException if the current thread was interrupted - * before or during the call - * (optional but recommended). - * @throws CancellationException {@inheritDoc} - */ - @Override - public V get() throws InterruptedException, ExecutionException { - if (Thread.interrupted()) { - throw new InterruptedException(); - } - Object localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - Waiter oldHead = waiters; - if (oldHead != Waiter.TOMBSTONE) { - Waiter node = new Waiter(); - do { - node.setNext(oldHead); - if (ATOMIC_HELPER.casWaiters(this, oldHead, node)) { - // we are on the stack, now wait for completion. - while (true) { - LockSupport.park(this); - // Check interruption first, if we woke up due to interruption we - // need to honor that. - if (Thread.interrupted()) { - removeWaiter(node); - throw new InterruptedException(); - } - // Otherwise re-read and check doneness. If we loop then it must - // have been a spurious wakeup - localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - } - } - oldHead = waiters; // re-read and loop. - } while (oldHead != Waiter.TOMBSTONE); - } - // re-read value, if we get here then we must have observed a TOMBSTONE - // while trying to add a waiter. - return getDoneValue(value); - } - - /** - * Unboxes {@code obj}. Assumes that obj is not {@code null} or a - * {@link SetFuture}. - */ - private V getDoneValue(Object obj) throws ExecutionException { - // While this seems like it might be too branch-y, simple benchmarking - // proves it to be unmeasurable (comparing done AbstractFutures with - // immediateFuture) - if (obj instanceof Cancellation) { - throw cancellationExceptionWithCause( - "Task was cancelled.", ((Cancellation) obj).cause); - } else if (obj instanceof Failure) { - throw new ExecutionException(((Failure) obj).exception); - } else if (obj == NULL) { - return null; - } else { - @SuppressWarnings("unchecked") // this is the only other option - V asV = (V) obj; - return asV; - } - } - - @Override - public boolean isDone() { - final Object localValue = value; - return localValue != null & !(localValue instanceof SetFuture); - } - - @Override - public boolean isCancelled() { - final Object localValue = value; - return localValue instanceof Cancellation; - } - - /** - * {@inheritDoc} - *

- *

If a cancellation attempt succeeds on a {@code Future} that had - * previously been {@linkplain#setFuture set asynchronously}, then the - * cancellation will also be propagated to the delegate {@code Future} that - * was supplied in the {@code setFuture} call. - */ - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - Object localValue = value; - boolean rValue = false; - if (localValue == null | localValue instanceof SetFuture) { - // Try to delay allocating the exception. At this point we may still - // lose the CAS, but it is certainly less likely. - Throwable cause = - GENERATE_CANCELLATION_CAUSES - ? new CancellationException("Future.cancel() was called.") - : null; - Object valueToSet = new Cancellation(mayInterruptIfRunning, cause); - AbstractFuture abstractFuture = this; - while (true) { - if (ATOMIC_HELPER.casValue(abstractFuture, localValue, valueToSet)) { - rValue = true; - // We call interuptTask before calling complete(), which is - // consistent with FutureTask - if (mayInterruptIfRunning) { - abstractFuture.interruptTask(); - } - complete(abstractFuture); - if (localValue instanceof SetFuture) { - // propagate cancellation to the future set in setfuture, this is - // racy, and we don't care if we are successful or not. - ListenableFuture futureToPropagateTo = ((SetFuture) localValue) - .future; - if (futureToPropagateTo instanceof TrustedFuture) { - // If the future is a TrustedFuture then we specifically avoid - // calling cancel() this has 2 benefits - // 1. for long chains of futures strung together with setFuture - // we consume less stack - // 2. we avoid allocating Cancellation objects at every level of - // the cancellation chain - // We can only do this for TrustedFuture, because - // TrustedFuture.cancel is final and does nothing but delegate - // to this method. - AbstractFuture trusted = (AbstractFuture) - futureToPropagateTo; - localValue = trusted.value; - if (localValue == null | localValue instanceof SetFuture) { - abstractFuture = trusted; - continue; // loop back up and try to complete the new future - } - } else { - // not a TrustedFuture, call cancel directly. - futureToPropagateTo.cancel(mayInterruptIfRunning); - } - } - break; - } - // obj changed, reread - localValue = abstractFuture.value; - if (!(localValue instanceof SetFuture)) { - // obj cannot be null at this point, because value can only change - // from null to non-null. So if value changed (and it did since we - // lost the CAS), then it cannot be null and since it isn't a - // SetFuture, then the future must be done and we should exit the loop - break; - } - } - } - return rValue; - } - - /** - * Subclasses can override this method to implement interruption of the - * future's computation. The method is invoked automatically by a - * successful call to {@link #cancel(boolean) cancel(true)}. - *

- *

The default implementation does nothing. - * - * @since 10.0 - */ - protected void interruptTask() { - } - - /** - * Returns true if this future was cancelled with {@code - * mayInterruptIfRunning} set to {@code true}. - * - * @since 14.0 - */ - protected final boolean wasInterrupted() { - final Object localValue = value; - return (localValue instanceof Cancellation) && ((Cancellation) localValue) - .wasInterrupted; - } - - /** - * {@inheritDoc} - * - * @since 10.0 - */ - @Override - public void addListener(Runnable listener, Executor executor) { - checkNotNull(listener, "Runnable was null."); - checkNotNull(executor, "Executor was null."); - Listener oldHead = listeners; - if (oldHead != Listener.TOMBSTONE) { - Listener newNode = new Listener(listener, executor); - do { - newNode.next = oldHead; - if (ATOMIC_HELPER.casListeners(this, oldHead, newNode)) { - return; - } - oldHead = listeners; // re-read - } while (oldHead != Listener.TOMBSTONE); - } - // If we get here then the Listener TOMBSTONE was set, which means the - // future is done, call the listener. - executeListener(listener, executor); - } - - /** - * Sets the result of this {@code Future} unless this {@code Future} has - * already been cancelled or set (including - * {@linkplain #setFuture set asynchronously}). When a call to this method - * returns, the {@code Future} is guaranteed to be - * {@linkplain #isDone done} only if the call was accepted (in which - * case it returns {@code true}). If it returns {@code false}, the {@code - * Future} may have previously been set asynchronously, in which case its - * result may not be known yet. That result, though not yet known, cannot - * be overridden by a call to a {@code set*} method, only by a call to - * {@link #cancel}. - * - * @param value the value to be used as the result - * @return true if the attempt was accepted, completing the {@code Future} - */ - protected boolean set(@Nullable V val) { - Object valueToSet = value == null ? NULL : val; - if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { - complete(this); - return true; - } - return false; - } - - /** - * Sets the failed result of this {@code Future} unless this {@code Future} - * has already been cancelled or set (including - * {@linkplain #setFuture set asynchronously}). When a call to this method - * returns, the {@code Future} is guaranteed to be - * {@linkplain #isDone done} only if the call was accepted (in which - * case it returns {@code true}). If it returns {@code false}, the - * {@code Future} may have previously been set asynchronously, in which case - * its result may not be known yet. That result, though not yet known, - * cannot be overridden by a call to a {@code set*} method, only by a call - * to {@link #cancel}. - * - * @param throwable the exception to be used as the failed result - * @return true if the attempt was accepted, completing the {@code Future} - */ - protected boolean setException(Throwable throwable) { - Object valueToSet = new Failure(checkNotNull(throwable)); - if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { - complete(this); - return true; - } - return false; - } - - /** - * Sets the result of this {@code Future} to match the supplied input - * {@code Future} once the supplied {@code Future} is done, unless this - * {@code Future} has already been cancelled or set (including "set - * asynchronously," defined below). - *

- *

If the supplied future is {@linkplain #isDone done} when this method - * is called and the call is accepted, then this future is guaranteed to - * have been completed with the supplied future by the time this method - * returns. If the supplied future is not done and the call is accepted, then - * the future will be set asynchronously. Note that such a result, - * though not yet known, cannot be overridden by a call to a {@code set*} - * method, only by a call to {@link #cancel}. - *

- *

If the call {@code setFuture(delegate)} is accepted and this {@code - * Future} is later cancelled, cancellation will be propagated to {@code - * delegate}. Additionally, any call to {@code setFuture} after any - * cancellation will propagate cancellation to the supplied {@code Future}. - * - * @param future the future to delegate to - * @return true if the attempt was accepted, indicating that the {@code - * Future} was not previously cancelled or set. - * @since 19.0 - */ - @Beta - @SuppressWarnings("deadstore") - protected boolean setFuture(ListenableFuture future) { - checkNotNull(future); - Object localValue = value; - if (localValue == null) { - if (future.isDone()) { - Object val = getFutureValue(future); - if (ATOMIC_HELPER.casValue(this, null, val)) { - complete(this); - return true; - } - return false; - } - SetFuture valueToSet = new SetFuture(this, future); - if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { - // the listener is responsible for calling completeWithFuture, - // directExecutor is appropriate since all we are doing is unpacking - // a completed future which should be fast. - try { - future.addListener(valueToSet, directExecutor()); - } catch (Throwable t) { - // addListener has thrown an exception! SetFuture.run can't throw - // any exceptions so this must have been caused by addListener - // itself. The most likely explanation is a misconfigured mock. Try - // to switch to Failure. - Failure failure; - try { - failure = new Failure(t); - } catch (Throwable oomMostLikely) { - failure = Failure.FALLBACK_INSTANCE; - } - // Note: The only way this CAS could fail is if cancel() has raced - // with us. That is ok. - boolean unused = ATOMIC_HELPER.casValue(this, valueToSet, failure); - } - return true; - } - localValue = value; // we lost the cas, fall through and maybe cancel - } - // The future has already been set to something. If it is cancellation we - // should cancel the incoming future. - if (localValue instanceof Cancellation) { - // we don't care if it fails, this is best-effort. - future.cancel(((Cancellation) localValue).wasInterrupted); - } - return false; - } - - /** - * Returns a value, suitable for storing in the {@link #value} field. From - * the given future, which is assumed to be done. - *

- *

This is approximately the inverse of {@link #getDoneValue(Object)} - */ - private static Object getFutureValue(ListenableFuture future) { - Object valueToSet; - if (future instanceof TrustedFuture) { - // Break encapsulation for TrustedFuture instances since we know that - // subclasses cannot override .get() (since it is final) and therefore - // this is equivalent to calling .get() and unpacking the exceptions - // like we do below (just much faster because it is a single field read - // instead of a read, several branches and possibly creating exceptions). - return ((AbstractFuture) future).value; - } else { - // Otherwise calculate valueToSet by calling .get() - try { - Object v = getDone(future); - valueToSet = v == null ? NULL : v; - } catch (ExecutionException exception) { - valueToSet = new Failure(exception.getCause()); - } catch (CancellationException cancellation) { - valueToSet = new Cancellation(false, cancellation); - } catch (Throwable t) { - valueToSet = new Failure(t); - } - } - return valueToSet; - } - - /** - * Unblocks all threads and runs all listeners. - */ - private static void complete(AbstractFuture future) { - Listener next = null; - outer: - while (true) { - future.releaseWaiters(); - // We call this before the listeners in order to avoid needing to manage - // a separate stack data structure for them. afterDone() should be - // generally fast and only used for cleanup work... but in theory can - // also be recursive and create StackOverflowErrors - future.afterDone(); - // push the current set of listeners onto next - next = future.clearListeners(next); - future = null; - while (next != null) { - Listener curr = next; - next = next.next; - Runnable task = curr.task; - if (task instanceof SetFuture) { - SetFuture setFuture = (SetFuture) task; - // We unwind setFuture specifically to avoid StackOverflowErrors in - // the case of long chains of SetFutures - // Handling this special case is important because there is no way - // to pass an executor to setFuture, so a user couldn't break the - // chain by doing this themselves. It is also potentially common - // if someone writes a recursive Futures.transformAsync transformer. - future = setFuture.owner; - if (future.value == setFuture) { - Object valueToSet = getFutureValue(setFuture.future); - if (ATOMIC_HELPER.casValue(future, setFuture, valueToSet)) { - continue outer; - } - } - // other wise the future we were trying to set is already done. - } else { - executeListener(task, curr.executor); - } - } - break; - } - } - - public static V getDone(Future future) throws ExecutionException { - /* - * We throw IllegalStateException, since the call could succeed later. - * Perhaps we "should" throw IllegalArgumentException, since the call - * could succeed with a different argument. Those exceptions' docs - * suggest that either is acceptable. Google's Java Practices page - * recommends IllegalArgumentException here, in part to keep its - * recommendation simple: Static methods should throw - * IllegalStateException only when they use static state. - * - * - * Why do we deviate here? The answer: We want for fluentFuture.getDone() - * to throw the same exception as Futures.getDone(fluentFuture). - */ - Preconditions.checkState(future.isDone(), "Future was expected to be " + - "done:" + - " %s", future); - return Uninterruptibles.getUninterruptibly(future); - } - - /** - * Callback method that is called exactly once after the future is completed. - *

- *

If {@link #interruptTask} is also run during completion, - * {@link #afterDone} runs after it. - *

- *

The default implementation of this method in {@code AbstractFuture} - * does nothing. This is intended for very lightweight cleanup work, for - * example, timing statistics or clearing fields. - * If your task does anything heavier consider, just using a listener with - * an executor. - * - * @since 20.0 - */ - @Beta - protected void afterDone() { - } - - /** - * If this future has been cancelled (and possibly interrupted), cancels - * (and possibly interrupts) the given future (if available). - *

- *

This method should be used only when this future is completed. It is - * designed to be called from {@code done}. - */ - final void maybePropagateCancellation(@Nullable Future related) { - if (related != null & isCancelled()) { - related.cancel(wasInterrupted()); - } - } - - /** - * Releases all threads in the {@link #waiters} list, and clears the list. - */ - private void releaseWaiters() { - Waiter head; - do { - head = waiters; - } while (!ATOMIC_HELPER.casWaiters(this, head, Waiter.TOMBSTONE)); - for (Waiter currentWaiter = head; - currentWaiter != null; currentWaiter = currentWaiter.next) { - currentWaiter.unpark(); - } - } - - /** - * Clears the {@link #listeners} list and prepends its contents to {@code - * onto}, least recently added first. - */ - private Listener clearListeners(Listener onto) { - // We need to - // 1. atomically swap the listeners with TOMBSTONE, this is because - // addListener uses that to to synchronize with us - // 2. reverse the linked list, because despite our rather clear contract, - // people depend on us executing listeners in the order they were added - // 3. push all the items onto 'onto' and return the new head of the stack - Listener head; - do { - head = listeners; - } while (!ATOMIC_HELPER.casListeners(this, head, Listener.TOMBSTONE)); - Listener reversedList = onto; - while (head != null) { - Listener tmp = head; - head = head.next; - tmp.next = reversedList; - reversedList = tmp; - } - return reversedList; - } - - /** - * Submits the given runnable to the given {@link Executor} catching and - * logging all {@linkplain RuntimeException runtime exceptions} thrown by - * the executor. - */ - private static void executeListener(Runnable runnable, Executor executor) { - try { - executor.execute(runnable); - } catch (RuntimeException e) { - // Log it and keep going -- bad runnable and/or executor. Don't punish - // the other runnables if we're given a bad one. We only catch - // RuntimeException because we want Errors to propagate up. - LOG.log( - Level.SEVERE, - "RuntimeException while executing runnable " + runnable + " with " + - "executor " + executor, - e); - } - } - - private abstract static class AtomicHelper { - /** - * Non volatile write of the thread to the {@link Waiter#thread} field. - */ - abstract void putThread(Waiter waiter, Thread newValue); - - /** - * Non volatile write of the waiter to the {@link Waiter#next} field. - */ - abstract void putNext(Waiter waiter, Waiter newValue); - - /** - * Performs a CAS operation on the {@link #waiters} field. - */ - abstract boolean casWaiters( - AbstractFuture future, Waiter expect, - Waiter update); - - /** - * Performs a CAS operation on the {@link #listeners} field. - */ - abstract boolean casListeners( - AbstractFuture future, Listener expect, - Listener update); - - /** - * Performs a CAS operation on the {@link #value} field. - */ - abstract boolean casValue( - AbstractFuture future, Object expect, Object update); - } - - /** - * {@link AtomicHelper} based on {@link sun.misc.Unsafe}. - *

- *

Static initialization of this class will fail if the - * {@link sun.misc.Unsafe} object cannot be accessed. - */ - private static final class UnsafeAtomicHelper extends AtomicHelper { - static final sun.misc.Unsafe UNSAFE; - static final long LISTENERS_OFFSET; - static final long WAITERS_OFFSET; - static final long VALUE_OFFSET; - static final long WAITER_THREAD_OFFSET; - static final long WAITER_NEXT_OFFSET; - - static { - sun.misc.Unsafe unsafe = null; - try { - unsafe = sun.misc.Unsafe.getUnsafe(); - } catch (SecurityException tryReflectionInstead) { - try { - unsafe = - AccessController.doPrivileged( - new PrivilegedExceptionAction() { - @Override - public sun.misc.Unsafe run() throws Exception { - Class k = sun.misc.Unsafe.class; - for (java.lang.reflect.Field f : k.getDeclaredFields()) { - f.setAccessible(true); - Object x = f.get(null); - if (k.isInstance(x)) { - return k.cast(x); - } - } - throw new NoSuchFieldError("the Unsafe"); - } - }); - } catch (PrivilegedActionException e) { - throw new RuntimeException( - "Could not initialize intrinsics", e.getCause()); - } - } - try { - Class abstractFuture = AbstractFuture.class; - WAITERS_OFFSET = unsafe - .objectFieldOffset(abstractFuture.getDeclaredField("waiters")); - LISTENERS_OFFSET = unsafe - .objectFieldOffset(abstractFuture.getDeclaredField("listeners")); - VALUE_OFFSET = unsafe - .objectFieldOffset(abstractFuture.getDeclaredField("value")); - WAITER_THREAD_OFFSET = unsafe - .objectFieldOffset(Waiter.class.getDeclaredField("thread")); - WAITER_NEXT_OFFSET = unsafe - .objectFieldOffset(Waiter.class.getDeclaredField("next")); - UNSAFE = unsafe; - } catch (Exception e) { - throwIfUnchecked(e); - throw new RuntimeException(e); - } - } - - public static void throwIfUnchecked(Throwable throwable) { - checkNotNull(throwable); - if (throwable instanceof RuntimeException) { - throw (RuntimeException) throwable; - } - if (throwable instanceof Error) { - throw (Error) throwable; - } - } - - @Override - void putThread(Waiter waiter, Thread newValue) { - UNSAFE.putObject(waiter, WAITER_THREAD_OFFSET, newValue); - } - - @Override - void putNext(Waiter waiter, Waiter newValue) { - UNSAFE.putObject(waiter, WAITER_NEXT_OFFSET, newValue); - } - - /** - * Performs a CAS operation on the {@link #waiters} field. - */ - @Override - boolean casWaiters(AbstractFuture future, Waiter expect, Waiter - update) { - return UNSAFE - .compareAndSwapObject(future, WAITERS_OFFSET, expect, update); - } - - /** - * Performs a CAS operation on the {@link #listeners} field. - */ - @Override - boolean casListeners( - AbstractFuture future, Listener expect, Listener update) { - return UNSAFE - .compareAndSwapObject(future, LISTENERS_OFFSET, expect, update); - } - - /** - * Performs a CAS operation on the {@link #value} field. - */ - @Override - boolean casValue(AbstractFuture future, Object expect, Object update) { - return UNSAFE.compareAndSwapObject(future, VALUE_OFFSET, expect, update); - } - } - - /** - * {@link AtomicHelper} based on {@link AtomicReferenceFieldUpdater}. - */ - @SuppressWarnings("visibilitymodifier") - private static final class SafeAtomicHelper extends AtomicHelper { - final AtomicReferenceFieldUpdater waiterThreadUpdater; - final AtomicReferenceFieldUpdater waiterNextUpdater; - final AtomicReferenceFieldUpdater waitersUpdater; - final AtomicReferenceFieldUpdater - listenersUpdater; - final AtomicReferenceFieldUpdater valueUpdater; - - SafeAtomicHelper( - AtomicReferenceFieldUpdater waiterThreadUpdater, - AtomicReferenceFieldUpdater waiterNextUpdater, - AtomicReferenceFieldUpdater waitersUpdater, - AtomicReferenceFieldUpdater listenersUpdater, - AtomicReferenceFieldUpdater valueUpdater) { - this.waiterThreadUpdater = waiterThreadUpdater; - this.waiterNextUpdater = waiterNextUpdater; - this.waitersUpdater = waitersUpdater; - this.listenersUpdater = listenersUpdater; - this.valueUpdater = valueUpdater; - } - - @Override - void putThread(Waiter waiter, Thread newValue) { - waiterThreadUpdater.lazySet(waiter, newValue); - } - - @Override - void putNext(Waiter waiter, Waiter newValue) { - waiterNextUpdater.lazySet(waiter, newValue); - } - - @Override - boolean casWaiters(AbstractFuture future, Waiter expect, Waiter - update) { - return waitersUpdater.compareAndSet(future, expect, update); - } - - @Override - boolean casListeners( - AbstractFuture future, Listener expect, Listener update) { - return listenersUpdater.compareAndSet(future, expect, update); - } - - @Override - boolean casValue(AbstractFuture future, Object expect, Object update) { - return valueUpdater.compareAndSet(future, expect, update); - } - } - - /** - * {@link AtomicHelper} based on {@code synchronized} and volatile writes. - *

- *

This is an implementation of last resort for when certain basic VM - * features are broken (like AtomicReferenceFieldUpdater). - */ - private static final class SynchronizedHelper extends AtomicHelper { - @Override - void putThread(Waiter waiter, Thread newValue) { - waiter.thread = newValue; - } - - @Override - void putNext(Waiter waiter, Waiter newValue) { - waiter.next = newValue; - } - - @Override - boolean casWaiters(AbstractFuture future, Waiter expect, Waiter - update) { - synchronized (future) { - if (future.waiters == expect) { - future.waiters = update; - return true; - } - return false; - } - } - - @Override - boolean casListeners( - AbstractFuture future, Listener expect, Listener update) { - synchronized (future) { - if (future.listeners == expect) { - future.listeners = update; - return true; - } - return false; - } - } - - @Override - boolean casValue(AbstractFuture future, Object expect, Object update) { - synchronized (future) { - if (future.value == expect) { - future.value = update; - return true; - } - return false; - } - } - } - - private static CancellationException cancellationExceptionWithCause( - @Nullable String message, @Nullable Throwable cause) { - CancellationException exception = new CancellationException(message); - exception.initCause(cause); - return exception; - } - - /** - * Returns an {@link Executor} that runs each task in the thread that invokes - * {@link Executor#execute execute}, as in {@link CallerRunsPolicy}. - *

- *

This instance is equivalent to:

   {@code
-   *   final class DirectExecutor implements Executor {
-   *     public void execute(Runnable r) {
-   *       r.run();
-   *     }
-   *   }}
- */ - public static Executor directExecutor() { - return DirectExecutor.INSTANCE; - } - - /** - * See {@link #directExecutor} for behavioral notes. - */ - private enum DirectExecutor implements Executor { - INSTANCE; - - @Override - public void execute(Runnable command) { - command.run(); - } - - @Override - public String toString() { - return "MoreExecutors.directExecutor()"; - } - } - -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AsyncChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AsyncChecker.java deleted file mode 100644 index f7391e3cca00d..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AsyncChecker.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.util.concurrent.ListenableFuture; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.server.datanode.checker.Checkable; - -import java.util.Optional; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; - -/** - * A class that can be used to schedule an asynchronous check on a given - * {@link Checkable}. If the check is successfully scheduled then a - * {@link ListenableFuture} is returned. - * - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public interface AsyncChecker { - - /** - * Schedule an asynchronous check for the given object. - * - * @param target object to be checked. - * - * @param context the interpretation of the context depends on the - * target. - * - * @return returns a {@link Optional of ListenableFuture} that can be used to - * retrieve the result of the asynchronous check. - */ - Optional> schedule(Checkable target, K context); - - /** - * Cancel all executing checks and wait for them to complete. - * First attempts a graceful cancellation, then cancels forcefully. - * Waits for the supplied timeout after both attempts. - * - * See {@link ExecutorService#awaitTermination} for a description of - * the parameters. - * - * @throws InterruptedException - */ - void shutdownAndWait(long timeout, TimeUnit timeUnit) - throws InterruptedException; -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java deleted file mode 100644 index 3e89f9031389c..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ /dev/null @@ -1,455 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import javax.annotation.Nullable; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.GetSpaceUsed; -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdfs.server.datanode.StorageLocation; -import org.apache.hadoop.hdfs.server.datanode.checker.Checkable; -import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; -import org.apache.hadoop.ozone.common.InconsistentStorageStateException; -import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion; -import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; - -import org.apache.hadoop.util.DiskChecker; -import org.apache.hadoop.util.Time; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.Properties; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; - -/** - * HddsVolume represents volume in a datanode. {@link VolumeSet} maintains a - * list of HddsVolumes, one for each volume in the Datanode. - * {@link VolumeInfo} in encompassed by this class. - *

- * The disk layout per volume is as follows: - *

../hdds/VERSION - *

{@literal ../hdds/<>/current/<>/<>/metadata} - *

{@literal ../hdds/<>/current/<>/<>/<>} - *

- * Each hdds volume has its own VERSION file. The hdds volume will have one - * scmUuid directory for each SCM it is a part of (currently only one SCM is - * supported). - * - * During DN startup, if the VERSION file exists, we verify that the - * clusterID in the version file matches the clusterID from SCM. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -@SuppressWarnings("finalclass") -public class HddsVolume - implements Checkable { - - private static final Logger LOG = LoggerFactory.getLogger(HddsVolume.class); - - public static final String HDDS_VOLUME_DIR = "hdds"; - - private final File hddsRootDir; - private final VolumeInfo volumeInfo; - private VolumeState state; - private final VolumeIOStats volumeIOStats; - - // VERSION file properties - private String storageID; // id of the file system - private String clusterID; // id of the cluster - private String datanodeUuid; // id of the DataNode - private long cTime; // creation time of the file system state - private int layoutVersion; // layout version of the storage data - private final AtomicLong committedBytes; // till Open containers become full - - /** - * Run a check on the current volume to determine if it is healthy. - * @param unused context for the check, ignored. - * @return result of checking the volume. - * @throws Exception if an exception was encountered while running - * the volume check. - */ - @Override - public VolumeCheckResult check(@Nullable Boolean unused) throws Exception { - DiskChecker.checkDir(hddsRootDir); - return VolumeCheckResult.HEALTHY; - } - - /** - * Builder for HddsVolume. - */ - public static class Builder { - private final String volumeRootStr; - private Configuration conf; - private StorageType storageType; - private long configuredCapacity; - - private String datanodeUuid; - private String clusterID; - private boolean failedVolume = false; - - public Builder(String rootDirStr) { - this.volumeRootStr = rootDirStr; - } - - public Builder conf(Configuration config) { - this.conf = config; - return this; - } - - public Builder storageType(StorageType st) { - this.storageType = st; - return this; - } - - public Builder configuredCapacity(long capacity) { - this.configuredCapacity = capacity; - return this; - } - - public Builder datanodeUuid(String datanodeUUID) { - this.datanodeUuid = datanodeUUID; - return this; - } - - public Builder clusterID(String cid) { - this.clusterID = cid; - return this; - } - - // This is added just to create failed volume objects, which will be used - // to create failed HddsVolume objects in the case of any exceptions caused - // during creating HddsVolume object. - public Builder failedVolume(boolean failed) { - this.failedVolume = failed; - return this; - } - - public HddsVolume build() throws IOException { - return new HddsVolume(this); - } - } - - private HddsVolume(Builder b) throws IOException { - if (!b.failedVolume) { - StorageLocation location = StorageLocation.parse(b.volumeRootStr); - hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR); - this.state = VolumeState.NOT_INITIALIZED; - this.clusterID = b.clusterID; - this.datanodeUuid = b.datanodeUuid; - this.volumeIOStats = new VolumeIOStats(); - - VolumeInfo.Builder volumeBuilder = - new VolumeInfo.Builder(b.volumeRootStr, b.conf) - .storageType(b.storageType) - .configuredCapacity(b.configuredCapacity); - this.volumeInfo = volumeBuilder.build(); - this.committedBytes = new AtomicLong(0); - - LOG.info("Creating Volume: " + this.hddsRootDir + " of storage type : " + - b.storageType + " and capacity : " + volumeInfo.getCapacity()); - - initialize(); - } else { - // Builder is called with failedVolume set, so create a failed volume - // HddsVolumeObject. - hddsRootDir = new File(b.volumeRootStr); - volumeIOStats = null; - volumeInfo = null; - storageID = UUID.randomUUID().toString(); - state = VolumeState.FAILED; - committedBytes = null; - } - } - - public VolumeInfo getVolumeInfo() { - return volumeInfo; - } - - /** - * Initializes the volume. - * Creates the Version file if not present, - * otherwise returns with IOException. - * @throws IOException - */ - private void initialize() throws IOException { - VolumeState intialVolumeState = analyzeVolumeState(); - switch (intialVolumeState) { - case NON_EXISTENT: - // Root directory does not exist. Create it. - if (!hddsRootDir.mkdirs()) { - throw new IOException("Cannot create directory " + hddsRootDir); - } - setState(VolumeState.NOT_FORMATTED); - createVersionFile(); - break; - case NOT_FORMATTED: - // Version File does not exist. Create it. - createVersionFile(); - break; - case NOT_INITIALIZED: - // Version File exists. Verify its correctness and update property fields. - readVersionFile(); - setState(VolumeState.NORMAL); - break; - case INCONSISTENT: - // Volume Root is in an inconsistent state. Skip loading this volume. - throw new IOException("Volume is in an " + VolumeState.INCONSISTENT + - " state. Skipped loading volume: " + hddsRootDir.getPath()); - default: - throw new IOException("Unrecognized initial state : " + - intialVolumeState + "of volume : " + hddsRootDir); - } - } - - private VolumeState analyzeVolumeState() { - if (!hddsRootDir.exists()) { - // Volume Root does not exist. - return VolumeState.NON_EXISTENT; - } - if (!hddsRootDir.isDirectory()) { - // Volume Root exists but is not a directory. - return VolumeState.INCONSISTENT; - } - File[] files = hddsRootDir.listFiles(); - if (files == null || files.length == 0) { - // Volume Root exists and is empty. - return VolumeState.NOT_FORMATTED; - } - if (!getVersionFile().exists()) { - // Volume Root is non empty but VERSION file does not exist. - return VolumeState.INCONSISTENT; - } - // Volume Root and VERSION file exist. - return VolumeState.NOT_INITIALIZED; - } - - public void format(String cid) throws IOException { - Preconditions.checkNotNull(cid, "clusterID cannot be null while " + - "formatting Volume"); - this.clusterID = cid; - initialize(); - } - - /** - * Create Version File and write property fields into it. - * @throws IOException - */ - private void createVersionFile() throws IOException { - this.storageID = HddsVolumeUtil.generateUuid(); - this.cTime = Time.now(); - this.layoutVersion = ChunkLayOutVersion.getLatestVersion().getVersion(); - - if (this.clusterID == null || datanodeUuid == null) { - // HddsDatanodeService does not have the cluster information yet. Wait - // for registration with SCM. - LOG.debug("ClusterID not available. Cannot format the volume {}", - this.hddsRootDir.getPath()); - setState(VolumeState.NOT_FORMATTED); - } else { - // Write the version file to disk. - writeVersionFile(); - setState(VolumeState.NORMAL); - } - } - - private void writeVersionFile() throws IOException { - Preconditions.checkNotNull(this.storageID, - "StorageID cannot be null in Version File"); - Preconditions.checkNotNull(this.clusterID, - "ClusterID cannot be null in Version File"); - Preconditions.checkNotNull(this.datanodeUuid, - "DatanodeUUID cannot be null in Version File"); - Preconditions.checkArgument(this.cTime > 0, - "Creation Time should be positive"); - Preconditions.checkArgument(this.layoutVersion == - DataNodeLayoutVersion.getLatestVersion().getVersion(), - "Version File should have the latest LayOutVersion"); - - File versionFile = getVersionFile(); - LOG.debug("Writing Version file to disk, {}", versionFile); - - DatanodeVersionFile dnVersionFile = new DatanodeVersionFile(this.storageID, - this.clusterID, this.datanodeUuid, this.cTime, this.layoutVersion); - dnVersionFile.createVersionFile(versionFile); - } - - /** - * Read Version File and update property fields. - * Get common storage fields. - * Should be overloaded if additional fields need to be read. - * - * @throws IOException on error - */ - private void readVersionFile() throws IOException { - File versionFile = getVersionFile(); - Properties props = DatanodeVersionFile.readFrom(versionFile); - if (props.isEmpty()) { - throw new InconsistentStorageStateException( - "Version file " + versionFile + " is missing"); - } - - LOG.debug("Reading Version file from disk, {}", versionFile); - this.storageID = HddsVolumeUtil.getStorageID(props, versionFile); - this.clusterID = HddsVolumeUtil.getClusterID(props, versionFile, - this.clusterID); - this.datanodeUuid = HddsVolumeUtil.getDatanodeUUID(props, versionFile, - this.datanodeUuid); - this.cTime = HddsVolumeUtil.getCreationTime(props, versionFile); - this.layoutVersion = HddsVolumeUtil.getLayOutVersion(props, versionFile); - } - - private File getVersionFile() { - return HddsVolumeUtil.getVersionFile(hddsRootDir); - } - - public File getHddsRootDir() { - return hddsRootDir; - } - - public StorageType getStorageType() { - if(volumeInfo != null) { - return volumeInfo.getStorageType(); - } - return StorageType.DEFAULT; - } - - public String getStorageID() { - return storageID; - } - - public String getClusterID() { - return clusterID; - } - - public String getDatanodeUuid() { - return datanodeUuid; - } - - public long getCTime() { - return cTime; - } - - public int getLayoutVersion() { - return layoutVersion; - } - - public VolumeState getStorageState() { - return state; - } - - public long getCapacity() throws IOException { - if(volumeInfo != null) { - return volumeInfo.getCapacity(); - } - return 0; - } - - public long getAvailable() throws IOException { - if(volumeInfo != null) { - return volumeInfo.getAvailable(); - } - return 0; - } - - public void setState(VolumeState state) { - this.state = state; - } - - public boolean isFailed() { - return (state == VolumeState.FAILED); - } - - public VolumeIOStats getVolumeIOStats() { - return volumeIOStats; - } - - public void failVolume() { - setState(VolumeState.FAILED); - if (volumeInfo != null) { - volumeInfo.shutdownUsageThread(); - } - } - - public void shutdown() { - this.state = VolumeState.NON_EXISTENT; - if (volumeInfo != null) { - volumeInfo.shutdownUsageThread(); - } - } - - /** - * VolumeState represents the different states a HddsVolume can be in. - * NORMAL => Volume can be used for storage - * FAILED => Volume has failed due and can no longer be used for - * storing containers. - * NON_EXISTENT => Volume Root dir does not exist - * INCONSISTENT => Volume Root dir is not empty but VERSION file is - * missing or Volume Root dir is not a directory - * NOT_FORMATTED => Volume Root exists but not formatted(no VERSION file) - * NOT_INITIALIZED => VERSION file exists but has not been verified for - * correctness. - */ - public enum VolumeState { - NORMAL, - FAILED, - NON_EXISTENT, - INCONSISTENT, - NOT_FORMATTED, - NOT_INITIALIZED - } - - /** - * add "delta" bytes to committed space in the volume. - * @param delta bytes to add to committed space counter - * @return bytes of committed space - */ - public long incCommittedBytes(long delta) { - return committedBytes.addAndGet(delta); - } - - /** - * return the committed space in the volume. - * @return bytes of committed space - */ - public long getCommittedBytes() { - return committedBytes.get(); - } - - /** - * Only for testing. Do not use otherwise. - */ - @VisibleForTesting - public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) { - if (volumeInfo != null) { - volumeInfo.setScmUsageForTesting(scmUsageForTest); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java deleted file mode 100644 index 800789f6e0e73..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java +++ /dev/null @@ -1,424 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Sets; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; -import org.apache.hadoop.util.DiskChecker.DiskErrorException; -import org.apache.hadoop.util.Timer; - -import static org.apache.hadoop.hdfs.server.datanode.DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; -import java.util.Optional; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY; - - -/** - * A class that encapsulates running disk checks against each HDDS volume and - * allows retrieving a list of failed volumes. - */ -public class HddsVolumeChecker { - - public static final Logger LOG = - LoggerFactory.getLogger(HddsVolumeChecker.class); - - private AsyncChecker delegateChecker; - - private final AtomicLong numVolumeChecks = new AtomicLong(0); - private final AtomicLong numAllVolumeChecks = new AtomicLong(0); - private final AtomicLong numSkippedChecks = new AtomicLong(0); - - /** - * Max allowed time for a disk check in milliseconds. If the check - * doesn't complete within this time we declare the disk as dead. - */ - private final long maxAllowedTimeForCheckMs; - - /** - * Minimum time between two successive disk checks of a volume. - */ - private final long minDiskCheckGapMs; - - /** - * Timestamp of the last check of all volumes. - */ - private long lastAllVolumesCheck; - - private final Timer timer; - - private final ExecutorService checkVolumeResultHandlerExecutorService; - - /** - * @param conf Configuration object. - * @param timer {@link Timer} object used for throttling checks. - */ - public HddsVolumeChecker(Configuration conf, Timer timer) - throws DiskErrorException { - maxAllowedTimeForCheckMs = conf.getTimeDuration( - DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, - DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - - if (maxAllowedTimeForCheckMs <= 0) { - throw new DiskErrorException("Invalid value configured for " - + DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - " - + maxAllowedTimeForCheckMs + " (should be > 0)"); - } - - this.timer = timer; - - /** - * Maximum number of volume failures that can be tolerated without - * declaring a fatal error. - */ - int maxVolumeFailuresTolerated = conf.getInt( - DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, - DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT); - - minDiskCheckGapMs = conf.getTimeDuration( - DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY, - DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_DEFAULT, - TimeUnit.MILLISECONDS); - - if (minDiskCheckGapMs < 0) { - throw new DiskErrorException("Invalid value configured for " - + DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY + " - " - + minDiskCheckGapMs + " (should be >= 0)"); - } - - long diskCheckTimeout = conf.getTimeDuration( - DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, - DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - - if (diskCheckTimeout < 0) { - throw new DiskErrorException("Invalid value configured for " - + DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - " - + diskCheckTimeout + " (should be >= 0)"); - } - - lastAllVolumesCheck = timer.monotonicNow() - minDiskCheckGapMs; - - if (maxVolumeFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT) { - throw new DiskErrorException("Invalid value configured for " - + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - " - + maxVolumeFailuresTolerated + " " - + DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG); - } - - delegateChecker = new ThrottledAsyncChecker<>( - timer, minDiskCheckGapMs, diskCheckTimeout, - Executors.newCachedThreadPool( - new ThreadFactoryBuilder() - .setNameFormat("DataNode DiskChecker thread %d") - .setDaemon(true) - .build())); - - checkVolumeResultHandlerExecutorService = Executors.newCachedThreadPool( - new ThreadFactoryBuilder() - .setNameFormat("VolumeCheck ResultHandler thread %d") - .setDaemon(true) - .build()); - } - - /** - * Run checks against all HDDS volumes. - * - * This check may be performed at service startup and subsequently at - * regular intervals to detect and handle failed volumes. - * - * @param volumes - Set of volumes to be checked. This set must be immutable - * for the duration of the check else the results will be - * unexpected. - * - * @return set of failed volumes. - */ - public Set checkAllVolumes(Collection volumes) - throws InterruptedException { - final long gap = timer.monotonicNow() - lastAllVolumesCheck; - if (gap < minDiskCheckGapMs) { - numSkippedChecks.incrementAndGet(); - if (LOG.isTraceEnabled()) { - LOG.trace( - "Skipped checking all volumes, time since last check {} is less " + - "than the minimum gap between checks ({} ms).", - gap, minDiskCheckGapMs); - } - return Collections.emptySet(); - } - - lastAllVolumesCheck = timer.monotonicNow(); - final Set healthyVolumes = new HashSet<>(); - final Set failedVolumes = new HashSet<>(); - final Set allVolumes = new HashSet<>(); - - final AtomicLong numVolumes = new AtomicLong(volumes.size()); - final CountDownLatch latch = new CountDownLatch(1); - - for (HddsVolume v : volumes) { - Optional> olf = - delegateChecker.schedule(v, null); - LOG.info("Scheduled health check for volume {}", v); - if (olf.isPresent()) { - allVolumes.add(v); - Futures.addCallback(olf.get(), - new ResultHandler(v, healthyVolumes, failedVolumes, - numVolumes, (ignored1, ignored2) -> latch.countDown())); - } else { - if (numVolumes.decrementAndGet() == 0) { - latch.countDown(); - } - } - } - - // Wait until our timeout elapses, after which we give up on - // the remaining volumes. - if (!latch.await(maxAllowedTimeForCheckMs, TimeUnit.MILLISECONDS)) { - LOG.warn("checkAllVolumes timed out after {} ms" + - maxAllowedTimeForCheckMs); - } - - numAllVolumeChecks.incrementAndGet(); - synchronized (this) { - // All volumes that have not been detected as healthy should be - // considered failed. This is a superset of 'failedVolumes'. - // - // Make a copy under the mutex as Sets.difference() returns a view - // of a potentially changing set. - return new HashSet<>(Sets.difference(allVolumes, healthyVolumes)); - } - } - - /** - * A callback interface that is supplied the result of running an - * async disk check on multiple volumes. - */ - public interface Callback { - /** - * @param healthyVolumes set of volumes that passed disk checks. - * @param failedVolumes set of volumes that failed disk checks. - */ - void call(Set healthyVolumes, - Set failedVolumes); - } - - /** - * Check a single volume asynchronously, returning a {@link ListenableFuture} - * that can be used to retrieve the final result. - * - * If the volume cannot be referenced then it is already closed and - * cannot be checked. No error is propagated to the callback. - * - * @param volume the volume that is to be checked. - * @param callback callback to be invoked when the volume check completes. - * @return true if the check was scheduled and the callback will be invoked. - * false otherwise. - */ - public boolean checkVolume(final HddsVolume volume, Callback callback) { - if (volume == null) { - LOG.debug("Cannot schedule check on null volume"); - return false; - } - - Optional> olf = - delegateChecker.schedule(volume, null); - if (olf.isPresent()) { - numVolumeChecks.incrementAndGet(); - Futures.addCallback(olf.get(), - new ResultHandler(volume, new HashSet<>(), new HashSet<>(), - new AtomicLong(1), callback), - checkVolumeResultHandlerExecutorService - ); - return true; - } - return false; - } - - /** - * A callback to process the results of checking a volume. - */ - private class ResultHandler - implements FutureCallback { - private final HddsVolume volume; - private final Set failedVolumes; - private final Set healthyVolumes; - private final AtomicLong volumeCounter; - - @Nullable - private final Callback callback; - - /** - * - * @param healthyVolumes set of healthy volumes. If the disk check is - * successful, add the volume here. - * @param failedVolumes set of failed volumes. If the disk check fails, - * add the volume here. - * @param volumeCounter volumeCounter used to trigger callback invocation. - * @param callback invoked when the volumeCounter reaches 0. - */ - ResultHandler(HddsVolume volume, - Set healthyVolumes, - Set failedVolumes, - AtomicLong volumeCounter, - @Nullable Callback callback) { - this.volume = volume; - this.healthyVolumes = healthyVolumes; - this.failedVolumes = failedVolumes; - this.volumeCounter = volumeCounter; - this.callback = callback; - } - - @Override - public void onSuccess(@Nonnull VolumeCheckResult result) { - switch (result) { - case HEALTHY: - case DEGRADED: - if (LOG.isDebugEnabled()) { - LOG.debug("Volume {} is {}.", volume, result); - } - markHealthy(); - break; - case FAILED: - LOG.warn("Volume {} detected as being unhealthy", volume); - markFailed(); - break; - default: - LOG.error("Unexpected health check result {} for volume {}", - result, volume); - markHealthy(); - break; - } - cleanup(); - } - - @Override - public void onFailure(@Nonnull Throwable t) { - Throwable exception = (t instanceof ExecutionException) ? - t.getCause() : t; - LOG.warn("Exception running disk checks against volume " + - volume, exception); - markFailed(); - cleanup(); - } - - private void markHealthy() { - synchronized (HddsVolumeChecker.this) { - healthyVolumes.add(volume); - } - } - - private void markFailed() { - synchronized (HddsVolumeChecker.this) { - failedVolumes.add(volume); - } - } - - private void cleanup() { - invokeCallback(); - } - - private void invokeCallback() { - try { - final long remaining = volumeCounter.decrementAndGet(); - if (callback != null && remaining == 0) { - callback.call(healthyVolumes, failedVolumes); - } - } catch(Exception e) { - // Propagating this exception is unlikely to be helpful. - LOG.warn("Unexpected exception", e); - } - } - } - - /** - * Shutdown the checker and its associated ExecutorService. - * - * See {@link ExecutorService#awaitTermination} for the interpretation - * of the parameters. - */ - void shutdownAndWait(int gracePeriod, TimeUnit timeUnit) { - try { - delegateChecker.shutdownAndWait(gracePeriod, timeUnit); - } catch (InterruptedException e) { - LOG.warn("{} interrupted during shutdown.", - this.getClass().getSimpleName()); - Thread.currentThread().interrupt(); - } - } - - /** - * This method is for testing only. - * - * @param testDelegate - */ - @VisibleForTesting - void setDelegateChecker( - AsyncChecker testDelegate) { - delegateChecker = testDelegate; - } - - /** - * Return the number of {@link #checkVolume} invocations. - */ - public long getNumVolumeChecks() { - return numVolumeChecks.get(); - } - - /** - * Return the number of {@link #checkAllVolumes} invocations. - */ - public long getNumAllVolumeChecks() { - return numAllVolumeChecks.get(); - } - - /** - * Return the number of checks skipped because the minimum gap since the - * last check had not elapsed. - */ - public long getNumSkippedChecks() { - return numSkippedChecks.get(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java deleted file mode 100644 index f503149aca438..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; -import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; - -import java.io.IOException; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Choose volumes in round-robin order. - * The caller should synchronize access to the list of volumes. - */ -public class RoundRobinVolumeChoosingPolicy implements VolumeChoosingPolicy { - - public static final Log LOG = LogFactory.getLog( - RoundRobinVolumeChoosingPolicy.class); - - // Stores the index of the next volume to be returned. - private AtomicInteger nextVolumeIndex = new AtomicInteger(0); - - @Override - public HddsVolume chooseVolume(List volumes, - long maxContainerSize) throws IOException { - - // No volumes available to choose from - if (volumes.size() < 1) { - throw new DiskOutOfSpaceException("No more available volumes"); - } - - // since volumes could've been removed because of the failure - // make sure we are not out of bounds - int nextIndex = nextVolumeIndex.get(); - int currentVolumeIndex = nextIndex < volumes.size() ? nextIndex : 0; - - int startVolumeIndex = currentVolumeIndex; - long maxAvailable = 0; - - while (true) { - final HddsVolume volume = volumes.get(currentVolumeIndex); - // adjust for remaining capacity in Open containers - long availableVolumeSize = volume.getAvailable() - - volume.getCommittedBytes(); - - currentVolumeIndex = (currentVolumeIndex + 1) % volumes.size(); - - if (availableVolumeSize > maxContainerSize) { - nextVolumeIndex.compareAndSet(nextIndex, currentVolumeIndex); - return volume; - } - - if (availableVolumeSize > maxAvailable) { - maxAvailable = availableVolumeSize; - } - - if (currentVolumeIndex == startVolumeIndex) { - throw new DiskOutOfSpaceException("Out of space: " - + "The volume with the most available space (=" + maxAvailable - + " B) is less than the container size (=" + maxContainerSize - + " B)."); - } - - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java deleted file mode 100644 index 836fdf3e39518..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java +++ /dev/null @@ -1,248 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.MoreExecutors; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.server.datanode.checker.Checkable; -import org.apache.hadoop.util.Timer; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; -import java.util.WeakHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -/** - * An implementation of {@link AsyncChecker} that skips checking recently - * checked objects. It will enforce at least minMsBetweenChecks - * milliseconds between two successive checks of any one object. - * - * It is assumed that the total number of Checkable objects in the system - * is small, (not more than a few dozen) since the checker uses O(Checkables) - * storage and also potentially O(Checkables) threads. - * - * minMsBetweenChecks should be configured reasonably - * by the caller to avoid spinning up too many threads frequently. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public class ThrottledAsyncChecker implements AsyncChecker { - public static final Logger LOG = - LoggerFactory.getLogger(ThrottledAsyncChecker.class); - - private final Timer timer; - - /** - * The ExecutorService used to schedule asynchronous checks. - */ - private final ListeningExecutorService executorService; - private final ScheduledExecutorService scheduledExecutorService; - - /** - * The minimum gap in milliseconds between two successive checks - * of the same object. This is the throttle. - */ - private final long minMsBetweenChecks; - private final long diskCheckTimeout; - - /** - * Map of checks that are currently in progress. Protected by the object - * lock. - */ - private final Map> checksInProgress; - - /** - * Maps Checkable objects to a future that can be used to retrieve - * the results of the operation. - * Protected by the object lock. - */ - private final Map> - completedChecks; - - public ThrottledAsyncChecker(final Timer timer, - final long minMsBetweenChecks, - final long diskCheckTimeout, - final ExecutorService executorService) { - this.timer = timer; - this.minMsBetweenChecks = minMsBetweenChecks; - this.diskCheckTimeout = diskCheckTimeout; - this.executorService = MoreExecutors.listeningDecorator(executorService); - this.checksInProgress = new HashMap<>(); - this.completedChecks = new WeakHashMap<>(); - - if (this.diskCheckTimeout > 0) { - ScheduledThreadPoolExecutor scheduledThreadPoolExecutor = new - ScheduledThreadPoolExecutor(1); - this.scheduledExecutorService = MoreExecutors - .getExitingScheduledExecutorService(scheduledThreadPoolExecutor); - } else { - this.scheduledExecutorService = null; - } - } - - /** - * See {@link AsyncChecker#schedule} - * - * If the object has been checked recently then the check will - * be skipped. Multiple concurrent checks for the same object - * will receive the same Future. - */ - @Override - public Optional> schedule( - Checkable target, K context) { - if (checksInProgress.containsKey(target)) { - return Optional.empty(); - } - - if (completedChecks.containsKey(target)) { - final ThrottledAsyncChecker.LastCheckResult result = - completedChecks.get(target); - final long msSinceLastCheck = timer.monotonicNow() - result.completedAt; - if (msSinceLastCheck < minMsBetweenChecks) { - if (LOG.isDebugEnabled()) { - LOG.debug("Skipped checking {}. Time since last check {}ms " + - "is less than the min gap {}ms.", - target, msSinceLastCheck, minMsBetweenChecks); - } - return Optional.empty(); - } - } - - LOG.info("Scheduling a check for {}", target); - final ListenableFuture lfWithoutTimeout = executorService.submit( - () -> target.check(context)); - final ListenableFuture lf; - - if (diskCheckTimeout > 0) { - lf = TimeoutFuture - .create(lfWithoutTimeout, diskCheckTimeout, TimeUnit.MILLISECONDS, - scheduledExecutorService); - } else { - lf = lfWithoutTimeout; - } - - checksInProgress.put(target, lf); - addResultCachingCallback(target, lf); - return Optional.of(lf); - } - - /** - * Register a callback to cache the result of a check. - * @param target - * @param lf - */ - private void addResultCachingCallback( - Checkable target, ListenableFuture lf) { - Futures.addCallback(lf, new FutureCallback() { - @Override - public void onSuccess(@Nullable V result) { - synchronized (ThrottledAsyncChecker.this) { - checksInProgress.remove(target); - completedChecks.put(target, new LastCheckResult<>( - result, timer.monotonicNow())); - } - } - - @Override - public void onFailure(@Nonnull Throwable t) { - synchronized (ThrottledAsyncChecker.this) { - checksInProgress.remove(target); - completedChecks.put(target, new LastCheckResult<>( - t, timer.monotonicNow())); - } - } - }); - } - - /** - * {@inheritDoc}. - * - * The results of in-progress checks are not useful during shutdown, - * so we optimize for faster shutdown by interrupt all actively - * executing checks. - */ - @Override - public void shutdownAndWait(long timeout, TimeUnit timeUnit) - throws InterruptedException { - if (scheduledExecutorService != null) { - scheduledExecutorService.shutdownNow(); - scheduledExecutorService.awaitTermination(timeout, timeUnit); - } - - executorService.shutdownNow(); - executorService.awaitTermination(timeout, timeUnit); - } - - /** - * Status of running a check. It can either be a result or an - * exception, depending on whether the check completed or threw. - */ - private static final class LastCheckResult { - /** - * Timestamp at which the check completed. - */ - private final long completedAt; - - /** - * Result of running the check if it completed. null if it threw. - */ - @Nullable - private final V result; - - /** - * Exception thrown by the check. null if it returned a result. - */ - private final Throwable exception; // null on success. - - /** - * Initialize with a result. - * @param result - */ - private LastCheckResult(V result, long completedAt) { - this.result = result; - this.exception = null; - this.completedAt = completedAt; - } - - /** - * Initialize with an exception. - * @param completedAt - * @param t - */ - private LastCheckResult(Throwable t, long completedAt) { - this.result = null; - this.exception = t; - this.completedAt = completedAt; - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java deleted file mode 100644 index 626814e96c119..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright (C) 2007 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * Some portions of this class have been modified to make it functional in this - * package. - */ -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ListenableFuture; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.annotation.Nullable; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * Implementation of {@code Futures#withTimeout}. - *

- *

Future that delegates to another but will finish early (via a - * {@link TimeoutException} wrapped in an {@link ExecutionException}) if the - * specified duration expires. The delegate future is interrupted and - * cancelled if it times out. - */ -final class TimeoutFuture extends AbstractFuture.TrustedFuture { - public static final Logger LOG = LoggerFactory.getLogger( - TimeoutFuture.class); - - static ListenableFuture create( - ListenableFuture delegate, - long time, - TimeUnit unit, - ScheduledExecutorService scheduledExecutor) { - TimeoutFuture result = new TimeoutFuture(delegate); - TimeoutFuture.Fire fire = new TimeoutFuture.Fire(result); - result.timer = scheduledExecutor.schedule(fire, time, unit); - delegate.addListener(fire, directExecutor()); - return result; - } - - /* - * Memory visibility of these fields. There are two cases to consider. - * - * 1. visibility of the writes to these fields to Fire.run: - * - * The initial write to delegateRef is made definitely visible via the - * semantics of addListener/SES.schedule. The later racy write in cancel() - * is not guaranteed to be observed, however that is fine since the - * correctness is based on the atomic state in our base class. The initial - * write to timer is never definitely visible to Fire.run since it is - * assigned after SES.schedule is called. Therefore Fire.run has to check - * for null. However, it should be visible if Fire.run is called by - * delegate.addListener since addListener is called after the assignment - * to timer, and importantly this is the main situation in which we need to - * be able to see the write. - * - * 2. visibility of the writes to an afterDone() call triggered by cancel(): - * - * Since these fields are non-final that means that TimeoutFuture is not - * being 'safely published', thus a motivated caller may be able to expose - * the reference to another thread that would then call cancel() and be - * unable to cancel the delegate. There are a number of ways to solve this, - * none of which are very pretty, and it is currently believed to be a - * purely theoretical problem (since the other actions should supply - * sufficient write-barriers). - */ - - @Nullable private ListenableFuture delegateRef; - @Nullable private Future timer; - - private TimeoutFuture(ListenableFuture delegate) { - this.delegateRef = Preconditions.checkNotNull(delegate); - } - - /** - * A runnable that is called when the delegate or the timer completes. - */ - private static final class Fire implements Runnable { - @Nullable - private TimeoutFuture timeoutFutureRef; - - Fire( - TimeoutFuture timeoutFuture) { - this.timeoutFutureRef = timeoutFuture; - } - - @Override - public void run() { - // If either of these reads return null then we must be after a - // successful cancel or another call to this method. - TimeoutFuture timeoutFuture = timeoutFutureRef; - if (timeoutFuture == null) { - return; - } - ListenableFuture delegate = timeoutFuture.delegateRef; - if (delegate == null) { - return; - } - - /* - * If we're about to complete the TimeoutFuture, we want to release our - * reference to it. Otherwise, we'll pin it (and its result) in memory - * until the timeout task is GCed. (The need to clear our reference to - * the TimeoutFuture is the reason we use a *static* nested class with - * a manual reference back to the "containing" class.) - * - * This has the nice-ish side effect of limiting reentrancy: run() calls - * timeoutFuture.setException() calls run(). That reentrancy would - * already be harmless, since timeoutFuture can be set (and delegate - * cancelled) only once. (And "set only once" is important for other - * reasons: run() can still be invoked concurrently in different threads, - * even with the above null checks.) - */ - timeoutFutureRef = null; - if (delegate.isDone()) { - timeoutFuture.setFuture(delegate); - } else { - try { - timeoutFuture.setException( - new TimeoutException("Future timed out: " + delegate)); - } finally { - delegate.cancel(true); - } - } - } - } - - @Override - protected void afterDone() { - maybePropagateCancellation(delegateRef); - - Future localTimer = timer; - // Try to cancel the timer as an optimization. - // timer may be null if this call to run was by the timer task since there - // is no happens-before edge between the assignment to timer and an - // execution of the timer task. - if (localTimer != null) { - localTimer.cancel(false); - } - - delegateRef = null; - timer = null; - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java deleted file mode 100644 index 9e2eb221e81f3..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import java.util.concurrent.atomic.AtomicLong; - -/** - * This class is used to track Volume IO stats for each HDDS Volume. - */ -public class VolumeIOStats { - - private final AtomicLong readBytes; - private final AtomicLong readOpCount; - private final AtomicLong writeBytes; - private final AtomicLong writeOpCount; - private final AtomicLong readTime; - private final AtomicLong writeTime; - - public VolumeIOStats() { - readBytes = new AtomicLong(0); - readOpCount = new AtomicLong(0); - writeBytes = new AtomicLong(0); - writeOpCount = new AtomicLong(0); - readTime = new AtomicLong(0); - writeTime = new AtomicLong(0); - } - - /** - * Increment number of bytes read from the volume. - * @param bytesRead - */ - public void incReadBytes(long bytesRead) { - readBytes.addAndGet(bytesRead); - } - - /** - * Increment the read operations performed on the volume. - */ - public void incReadOpCount() { - readOpCount.incrementAndGet(); - } - - /** - * Increment number of bytes written on to the volume. - * @param bytesWritten - */ - public void incWriteBytes(long bytesWritten) { - writeBytes.addAndGet(bytesWritten); - } - - /** - * Increment the write operations performed on the volume. - */ - public void incWriteOpCount() { - writeOpCount.incrementAndGet(); - } - - /** - * Increment the time taken by read operation on the volume. - * @param time - */ - public void incReadTime(long time) { - readTime.addAndGet(time); - } - - /** - * Increment the time taken by write operation on the volume. - * @param time - */ - public void incWriteTime(long time) { - writeTime.addAndGet(time); - } - - /** - * Returns total number of bytes read from the volume. - * @return long - */ - public long getReadBytes() { - return readBytes.get(); - } - - /** - * Returns total number of bytes written to the volume. - * @return long - */ - public long getWriteBytes() { - return writeBytes.get(); - } - - /** - * Returns total number of read operations performed on the volume. - * @return long - */ - public long getReadOpCount() { - return readOpCount.get(); - } - - /** - * Returns total number of write operations performed on the volume. - * @return long - */ - public long getWriteOpCount() { - return writeOpCount.get(); - } - - /** - * Returns total read operations time on the volume. - * @return long - */ - public long getReadTime() { - return readTime.get(); - } - - /** - * Returns total write operations time on the volume. - * @return long - */ - public long getWriteTime() { - return writeTime.get(); - } - - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java deleted file mode 100644 index 31f83ec8dabb1..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java +++ /dev/null @@ -1,141 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.GetSpaceUsed; -import org.apache.hadoop.fs.StorageType; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; - -/** - * Stores information about a disk/volume. - */ -public final class VolumeInfo { - - private static final Logger LOG = LoggerFactory.getLogger(VolumeInfo.class); - - private final String rootDir; - private final StorageType storageType; - - // Space usage calculator - private final VolumeUsage usage; - - // Capacity configured. This is useful when we want to - // limit the visible capacity for tests. If negative, then we just - // query from the filesystem. - private long configuredCapacity; - - /** - * Builder for VolumeInfo. - */ - public static class Builder { - private final Configuration conf; - private final String rootDir; - private StorageType storageType; - private long configuredCapacity; - - public Builder(String root, Configuration config) { - this.rootDir = root; - this.conf = config; - } - - public Builder storageType(StorageType st) { - this.storageType = st; - return this; - } - - public Builder configuredCapacity(long capacity) { - this.configuredCapacity = capacity; - return this; - } - - public VolumeInfo build() throws IOException { - return new VolumeInfo(this); - } - } - - private VolumeInfo(Builder b) throws IOException { - - this.rootDir = b.rootDir; - File root = new File(this.rootDir); - - Boolean succeeded = root.isDirectory() || root.mkdirs(); - - if (!succeeded) { - LOG.error("Unable to create the volume root dir at : {}", root); - throw new IOException("Unable to create the volume root dir at " + root); - } - - this.storageType = (b.storageType != null ? - b.storageType : StorageType.DEFAULT); - - this.configuredCapacity = (b.configuredCapacity != 0 ? - b.configuredCapacity : -1); - - this.usage = new VolumeUsage(root, b.conf); - } - - public long getCapacity() throws IOException { - if (configuredCapacity < 0) { - return usage.getCapacity(); - } - return configuredCapacity; - } - - public long getAvailable() throws IOException { - return usage.getAvailable(); - } - - public long getScmUsed() throws IOException { - return usage.getScmUsed(); - } - - protected void shutdownUsageThread() { - usage.shutdown(); - } - - public String getRootDir() { - return this.rootDir; - } - - public StorageType getStorageType() { - return this.storageType; - } - - /** - * Only for testing. Do not use otherwise. - */ - @VisibleForTesting - public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) { - usage.setScmUsageForTesting(scmUsageForTest); - } - - /** - * Only for testing. Do not use otherwise. - */ - @VisibleForTesting - public VolumeUsage getUsageForTesting() { - return usage; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java deleted file mode 100644 index 875e96a0a96e5..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java +++ /dev/null @@ -1,519 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.EnumMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdfs.server.datanode.StorageLocation; -import org.apache.hadoop.ozone.common.InconsistentStorageStateException; -import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume.VolumeState; -import org.apache.hadoop.util.DiskChecker; -import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; -import org.apache.hadoop.util.ShutdownHookManager; -import org.apache.hadoop.util.Timer; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; -import static org.apache.hadoop.util.RunJar.SHUTDOWN_HOOK_PRIORITY; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * VolumeSet to manage HDDS volumes in a DataNode. - */ -public class VolumeSet { - - private static final Logger LOG = LoggerFactory.getLogger(VolumeSet.class); - - private Configuration conf; - - /** - * {@link VolumeSet#volumeMap} maintains a map of all active volumes in the - * DataNode. Each volume has one-to-one mapping with a volumeInfo object. - */ - private Map volumeMap; - /** - * {@link VolumeSet#failedVolumeMap} maintains a map of volumes which have - * failed. The keys in this map and {@link VolumeSet#volumeMap} are - * mutually exclusive. - */ - private Map failedVolumeMap; - - /** - * {@link VolumeSet#volumeStateMap} maintains a list of active volumes per - * StorageType. - */ - private EnumMap> volumeStateMap; - - /** - * An executor for periodic disk checks. - */ - private final ScheduledExecutorService diskCheckerservice; - private final ScheduledFuture periodicDiskChecker; - - private static final long DISK_CHECK_INTERVAL_MINUTES = 15; - - /** - * A Reentrant Read Write Lock to synchronize volume operations in VolumeSet. - * Any update to {@link VolumeSet#volumeMap}, - * {@link VolumeSet#failedVolumeMap}, or {@link VolumeSet#volumeStateMap} - * should be done after acquiring the write lock. - */ - private final ReentrantReadWriteLock volumeSetRWLock; - - private final String datanodeUuid; - private String clusterID; - - private Runnable shutdownHook; - private final HddsVolumeChecker volumeChecker; - - public VolumeSet(String dnUuid, Configuration conf) - throws IOException { - this(dnUuid, null, conf); - } - - public VolumeSet(String dnUuid, String clusterID, Configuration conf) - throws IOException { - this.datanodeUuid = dnUuid; - this.clusterID = clusterID; - this.conf = conf; - this.volumeSetRWLock = new ReentrantReadWriteLock(); - this.volumeChecker = getVolumeChecker(conf); - this.diskCheckerservice = Executors.newScheduledThreadPool( - 1, r -> new Thread(r, "Periodic HDDS volume checker")); - this.periodicDiskChecker = - diskCheckerservice.scheduleWithFixedDelay(() -> { - try { - checkAllVolumes(); - } catch (IOException e) { - LOG.warn("Exception while checking disks", e); - } - }, DISK_CHECK_INTERVAL_MINUTES, DISK_CHECK_INTERVAL_MINUTES, - TimeUnit.MINUTES); - initializeVolumeSet(); - } - - @VisibleForTesting - HddsVolumeChecker getVolumeChecker(Configuration configuration) - throws DiskChecker.DiskErrorException { - return new HddsVolumeChecker(configuration, new Timer()); - } - - /** - * Add DN volumes configured through ConfigKeys to volumeMap. - */ - private void initializeVolumeSet() throws IOException { - volumeMap = new ConcurrentHashMap<>(); - failedVolumeMap = new ConcurrentHashMap<>(); - volumeStateMap = new EnumMap<>(StorageType.class); - - Collection rawLocations = conf.getTrimmedStringCollection( - HDDS_DATANODE_DIR_KEY); - if (rawLocations.isEmpty()) { - rawLocations = conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY); - } - if (rawLocations.isEmpty()) { - throw new IllegalArgumentException("No location configured in either " - + HDDS_DATANODE_DIR_KEY + " or " + DFS_DATANODE_DATA_DIR_KEY); - } - - for (StorageType storageType : StorageType.values()) { - volumeStateMap.put(storageType, new ArrayList<>()); - } - - for (String locationString : rawLocations) { - try { - StorageLocation location = StorageLocation.parse(locationString); - - HddsVolume hddsVolume = createVolume(location.getUri().getPath(), - location.getStorageType()); - - checkAndSetClusterID(hddsVolume.getClusterID()); - - LOG.info("Added Volume : {} to VolumeSet", - hddsVolume.getHddsRootDir().getPath()); - - if (!hddsVolume.getHddsRootDir().mkdirs() && - !hddsVolume.getHddsRootDir().exists()) { - throw new IOException("Failed to create HDDS storage dir " + - hddsVolume.getHddsRootDir()); - } - volumeMap.put(hddsVolume.getHddsRootDir().getPath(), hddsVolume); - volumeStateMap.get(hddsVolume.getStorageType()).add(hddsVolume); - } catch (IOException e) { - HddsVolume volume = new HddsVolume.Builder(locationString) - .failedVolume(true).build(); - failedVolumeMap.put(locationString, volume); - LOG.error("Failed to parse the storage location: " + locationString, e); - } - } - - // First checking if we have any volumes, if all volumes are failed the - // volumeMap size will be zero, and we throw Exception. - if (volumeMap.size() == 0) { - throw new DiskOutOfSpaceException("No storage locations configured"); - } - - checkAllVolumes(); - - // Ensure volume threads are stopped and scm df is saved during shutdown. - shutdownHook = () -> { - saveVolumeSetUsed(); - }; - ShutdownHookManager.get().addShutdownHook(shutdownHook, - SHUTDOWN_HOOK_PRIORITY); - } - - /** - * Run a synchronous parallel check of all HDDS volumes, removing - * failed volumes. - */ - private void checkAllVolumes() throws IOException { - List allVolumes = getVolumesList(); - Set failedVolumes; - try { - failedVolumes = volumeChecker.checkAllVolumes(allVolumes); - } catch (InterruptedException e) { - throw new IOException("Interrupted while running disk check", e); - } - - if (failedVolumes.size() > 0) { - LOG.warn("checkAllVolumes got {} failed volumes - {}", - failedVolumes.size(), failedVolumes); - handleVolumeFailures(failedVolumes); - } else { - LOG.debug("checkAllVolumes encountered no failures"); - } - } - - /** - * Handle one or more failed volumes. - * @param failedVolumes - */ - private void handleVolumeFailures(Set failedVolumes) { - for (HddsVolume v: failedVolumes) { - this.writeLock(); - try { - // Immediately mark the volume as failed so it is unavailable - // for new containers. - volumeMap.remove(v.getHddsRootDir().getPath()); - failedVolumeMap.putIfAbsent(v.getHddsRootDir().getPath(), v); - } finally { - this.writeUnlock(); - } - - // TODO: - // 1. Mark all closed containers on the volume as unhealthy. - // 2. Consider stopping IO on open containers and tearing down - // active pipelines. - // 3. Handle Ratis log disk failure. - } - } - - /** - * If Version file exists and the {@link VolumeSet#clusterID} is not set yet, - * assign it the value from Version file. Otherwise, check that the given - * id matches with the id from version file. - * @param idFromVersionFile value of the property from Version file - * @throws InconsistentStorageStateException - */ - private void checkAndSetClusterID(String idFromVersionFile) - throws InconsistentStorageStateException { - // If the clusterID is null (not set), assign it the value - // from version file. - if (this.clusterID == null) { - this.clusterID = idFromVersionFile; - return; - } - - // If the clusterID is already set, it should match with the value from the - // version file. - if (!idFromVersionFile.equals(this.clusterID)) { - throw new InconsistentStorageStateException( - "Mismatched ClusterIDs. VolumeSet has: " + this.clusterID + - ", and version file has: " + idFromVersionFile); - } - } - - /** - * Acquire Volume Set Read lock. - */ - public void readLock() { - volumeSetRWLock.readLock().lock(); - } - - /** - * Release Volume Set Read lock. - */ - public void readUnlock() { - volumeSetRWLock.readLock().unlock(); - } - - /** - * Acquire Volume Set Write lock. - */ - public void writeLock() { - volumeSetRWLock.writeLock().lock(); - } - - /** - * Release Volume Set Write lock. - */ - public void writeUnlock() { - volumeSetRWLock.writeLock().unlock(); - } - - - private HddsVolume createVolume(String locationString, - StorageType storageType) throws IOException { - HddsVolume.Builder volumeBuilder = new HddsVolume.Builder(locationString) - .conf(conf) - .datanodeUuid(datanodeUuid) - .clusterID(clusterID) - .storageType(storageType); - return volumeBuilder.build(); - } - - - // Add a volume to VolumeSet - boolean addVolume(String dataDir) { - return addVolume(dataDir, StorageType.DEFAULT); - } - - // Add a volume to VolumeSet - private boolean addVolume(String volumeRoot, StorageType storageType) { - String hddsRoot = HddsVolumeUtil.getHddsRoot(volumeRoot); - boolean success; - - this.writeLock(); - try { - if (volumeMap.containsKey(hddsRoot)) { - LOG.warn("Volume : {} already exists in VolumeMap", hddsRoot); - success = false; - } else { - if (failedVolumeMap.containsKey(hddsRoot)) { - failedVolumeMap.remove(hddsRoot); - } - - HddsVolume hddsVolume = createVolume(volumeRoot, storageType); - volumeMap.put(hddsVolume.getHddsRootDir().getPath(), hddsVolume); - volumeStateMap.get(hddsVolume.getStorageType()).add(hddsVolume); - - LOG.info("Added Volume : {} to VolumeSet", - hddsVolume.getHddsRootDir().getPath()); - success = true; - } - } catch (IOException ex) { - LOG.error("Failed to add volume " + volumeRoot + " to VolumeSet", ex); - success = false; - } finally { - this.writeUnlock(); - } - return success; - } - - // Mark a volume as failed - public void failVolume(String dataDir) { - String hddsRoot = HddsVolumeUtil.getHddsRoot(dataDir); - - this.writeLock(); - try { - if (volumeMap.containsKey(hddsRoot)) { - HddsVolume hddsVolume = volumeMap.get(hddsRoot); - hddsVolume.failVolume(); - - volumeMap.remove(hddsRoot); - volumeStateMap.get(hddsVolume.getStorageType()).remove(hddsVolume); - failedVolumeMap.put(hddsRoot, hddsVolume); - - LOG.info("Moving Volume : {} to failed Volumes", hddsRoot); - } else if (failedVolumeMap.containsKey(hddsRoot)) { - LOG.info("Volume : {} is not active", hddsRoot); - } else { - LOG.warn("Volume : {} does not exist in VolumeSet", hddsRoot); - } - } finally { - this.writeUnlock(); - } - } - - // Remove a volume from the VolumeSet completely. - public void removeVolume(String dataDir) throws IOException { - String hddsRoot = HddsVolumeUtil.getHddsRoot(dataDir); - - this.writeLock(); - try { - if (volumeMap.containsKey(hddsRoot)) { - HddsVolume hddsVolume = volumeMap.get(hddsRoot); - hddsVolume.shutdown(); - - volumeMap.remove(hddsRoot); - volumeStateMap.get(hddsVolume.getStorageType()).remove(hddsVolume); - - LOG.info("Removed Volume : {} from VolumeSet", hddsRoot); - } else if (failedVolumeMap.containsKey(hddsRoot)) { - HddsVolume hddsVolume = failedVolumeMap.get(hddsRoot); - hddsVolume.setState(VolumeState.NON_EXISTENT); - - failedVolumeMap.remove(hddsRoot); - LOG.info("Removed Volume : {} from failed VolumeSet", hddsRoot); - } else { - LOG.warn("Volume : {} does not exist in VolumeSet", hddsRoot); - } - } finally { - this.writeUnlock(); - } - } - - /** - * This method, call shutdown on each volume to shutdown volume usage - * thread and write scmUsed on each volume. - */ - private void saveVolumeSetUsed() { - for (HddsVolume hddsVolume : volumeMap.values()) { - try { - hddsVolume.shutdown(); - } catch (Exception ex) { - LOG.error("Failed to shutdown volume : " + hddsVolume.getHddsRootDir(), - ex); - } - } - } - - /** - * Shutdown the volumeset. - */ - public void shutdown() { - saveVolumeSetUsed(); - stopDiskChecker(); - if (shutdownHook != null) { - ShutdownHookManager.get().removeShutdownHook(shutdownHook); - } - } - - private void stopDiskChecker() { - periodicDiskChecker.cancel(true); - volumeChecker.shutdownAndWait(0, TimeUnit.SECONDS); - diskCheckerservice.shutdownNow(); - } - - @VisibleForTesting - public List getVolumesList() { - return ImmutableList.copyOf(volumeMap.values()); - } - - @VisibleForTesting - public List getFailedVolumesList() { - return ImmutableList.copyOf(failedVolumeMap.values()); - } - - @VisibleForTesting - public Map getVolumeMap() { - return ImmutableMap.copyOf(volumeMap); - } - - @VisibleForTesting - public Map> getVolumeStateMap() { - return ImmutableMap.copyOf(volumeStateMap); - } - - public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() - throws IOException { - boolean failed; - this.readLock(); - try { - StorageLocationReport[] reports = new StorageLocationReport[volumeMap - .size() + failedVolumeMap.size()]; - int counter = 0; - HddsVolume hddsVolume; - for (Map.Entry entry : volumeMap.entrySet()) { - hddsVolume = entry.getValue(); - VolumeInfo volumeInfo = hddsVolume.getVolumeInfo(); - long scmUsed; - long remaining; - long capacity; - failed = false; - try { - scmUsed = volumeInfo.getScmUsed(); - remaining = volumeInfo.getAvailable(); - capacity = volumeInfo.getCapacity(); - } catch (IOException ex) { - LOG.warn("Failed to get scmUsed and remaining for container " + - "storage location {}", volumeInfo.getRootDir(), ex); - // reset scmUsed and remaining if df/du failed. - scmUsed = 0; - remaining = 0; - capacity = 0; - failed = true; - } - - StorageLocationReport.Builder builder = - StorageLocationReport.newBuilder(); - builder.setStorageLocation(volumeInfo.getRootDir()) - .setId(hddsVolume.getStorageID()) - .setFailed(failed) - .setCapacity(capacity) - .setRemaining(remaining) - .setScmUsed(scmUsed) - .setStorageType(hddsVolume.getStorageType()); - StorageLocationReport r = builder.build(); - reports[counter++] = r; - } - for (Map.Entry entry : failedVolumeMap.entrySet()) { - hddsVolume = entry.getValue(); - StorageLocationReport.Builder builder = StorageLocationReport - .newBuilder(); - builder.setStorageLocation(hddsVolume.getHddsRootDir() - .getAbsolutePath()).setId(hddsVolume.getStorageID()).setFailed(true) - .setCapacity(0).setRemaining(0).setScmUsed(0).setStorageType( - hddsVolume.getStorageType()); - StorageLocationReport r = builder.build(); - reports[counter++] = r; - } - NodeReportProto.Builder nrb = NodeReportProto.newBuilder(); - for (int i = 0; i < reports.length; i++) { - nrb.addStorageReport(reports[i].getProtoBufMessage()); - } - return nrb.build(); - } finally { - this.readUnlock(); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java deleted file mode 100644 index 693bcb50cc5de..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CachingGetSpaceUsed; -import org.apache.hadoop.fs.DF; -import org.apache.hadoop.fs.GetSpaceUsed; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStreamWriter; -import java.nio.charset.StandardCharsets; -import java.util.Scanner; -import java.util.concurrent.atomic.AtomicReference; - -/** - * Class that wraps the space df of the Datanode Volumes used by SCM - * containers. - */ -public class VolumeUsage { - private static final Logger LOG = LoggerFactory.getLogger(VolumeUsage.class); - - private final File rootDir; - private final DF df; - private final File scmUsedFile; - private AtomicReference scmUsage; - private boolean shutdownComplete; - - private static final String DU_CACHE_FILE = "scmUsed"; - private volatile boolean scmUsedSaved = false; - - VolumeUsage(File dataLoc, Configuration conf) - throws IOException { - this.rootDir = dataLoc; - - // SCM used cache file - scmUsedFile = new File(rootDir, DU_CACHE_FILE); - // get overall disk df - this.df = new DF(rootDir, conf); - - startScmUsageThread(conf); - } - - void startScmUsageThread(Configuration conf) throws IOException { - // get SCM specific df - scmUsage = new AtomicReference<>( - new CachingGetSpaceUsed.Builder().setPath(rootDir) - .setConf(conf) - .setInitialUsed(loadScmUsed()) - .build()); - } - - long getCapacity() { - long capacity = df.getCapacity(); - return (capacity > 0) ? capacity : 0; - } - - /* - * Calculate the available space in the volume. - */ - long getAvailable() throws IOException { - long remaining = getCapacity() - getScmUsed(); - long available = df.getAvailable(); - if (remaining > available) { - remaining = available; - } - return (remaining > 0) ? remaining : 0; - } - - long getScmUsed() throws IOException{ - return scmUsage.get().getUsed(); - } - - public synchronized void shutdown() { - if (!shutdownComplete) { - saveScmUsed(); - - if (scmUsage.get() instanceof CachingGetSpaceUsed) { - IOUtils.cleanupWithLogger( - null, ((CachingGetSpaceUsed) scmUsage.get())); - } - shutdownComplete = true; - } - } - - /** - * Read in the cached DU value and return it if it is less than 600 seconds - * old (DU update interval). Slight imprecision of scmUsed is not critical - * and skipping DU can significantly shorten the startup time. - * If the cached value is not available or too old, -1 is returned. - */ - long loadScmUsed() { - long cachedScmUsed; - long mtime; - Scanner sc; - - try { - sc = new Scanner(scmUsedFile, "UTF-8"); - } catch (FileNotFoundException fnfe) { - return -1; - } - - try { - // Get the recorded scmUsed from the file. - if (sc.hasNextLong()) { - cachedScmUsed = sc.nextLong(); - } else { - return -1; - } - // Get the recorded mtime from the file. - if (sc.hasNextLong()) { - mtime = sc.nextLong(); - } else { - return -1; - } - - // Return the cached value if mtime is okay. - if (mtime > 0 && (Time.now() - mtime < 600000L)) { - LOG.info("Cached ScmUsed found for {} : {} ", rootDir, - cachedScmUsed); - return cachedScmUsed; - } - return -1; - } finally { - sc.close(); - } - } - - /** - * Write the current scmUsed to the cache file. - */ - void saveScmUsed() { - if (scmUsedFile.exists() && !scmUsedFile.delete()) { - LOG.warn("Failed to delete old scmUsed file in {}.", rootDir); - } - OutputStreamWriter out = null; - try { - long used = getScmUsed(); - if (used > 0) { - out = new OutputStreamWriter(new FileOutputStream(scmUsedFile), - StandardCharsets.UTF_8); - // mtime is written last, so that truncated writes won't be valid. - out.write(Long.toString(used) + " " + Long.toString(Time.now())); - out.flush(); - out.close(); - out = null; - } - } catch (IOException ioe) { - // If write failed, the volume might be bad. Since the cache file is - // not critical, log the error and continue. - LOG.warn("Failed to write scmUsed to " + scmUsedFile, ioe); - } finally { - IOUtils.cleanupWithLogger(null, out); - } - } - - /** - * Only for testing. Do not use otherwise. - */ - @VisibleForTesting - @SuppressFBWarnings( - value = "IS2_INCONSISTENT_SYNC", - justification = "scmUsage is an AtomicReference. No additional " + - "synchronization is needed.") - public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) { - scmUsage.set(scmUsageForTest); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java deleted file mode 100644 index 86093c6015c83..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; -/** - This package contains volume/ disk related classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java deleted file mode 100644 index ad68c4dc96cd8..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; -import org.apache.hadoop.hdds.utils.MetaStoreIterator; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.apache.hadoop.hdds.utils.MetadataStore.KeyValue; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.io.File; -import java.io.IOException; -import java.util.NoSuchElementException; - - -/** - * Block Iterator for KeyValue Container. This block iterator returns blocks - * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no - * filter is specified, then default filter used is - * {@link MetadataKeyFilters#getNormalKeyFilter()} - */ -@InterfaceAudience.Public -public class KeyValueBlockIterator implements BlockIterator, - Closeable { - - private static final Logger LOG = LoggerFactory.getLogger( - KeyValueBlockIterator.class); - - private MetaStoreIterator blockIterator; - private final ReferenceCountedDB db; - private static KeyPrefixFilter defaultBlockFilter = MetadataKeyFilters - .getNormalKeyFilter(); - private KeyPrefixFilter blockFilter; - private BlockData nextBlock; - private long containerId; - - /** - * KeyValueBlockIterator to iterate blocks in a container. - * @param id - container id - * @param path - container base path - * @throws IOException - */ - - public KeyValueBlockIterator(long id, File path) - throws IOException { - this(id, path, defaultBlockFilter); - } - - /** - * KeyValueBlockIterator to iterate blocks in a container. - * @param id - container id - * @param path - container base path - * @param filter - Block filter, filter to be applied for blocks - * @throws IOException - */ - public KeyValueBlockIterator(long id, File path, KeyPrefixFilter filter) - throws IOException { - containerId = id; - File metdataPath = new File(path, OzoneConsts.METADATA); - File containerFile = ContainerUtils.getContainerFile(metdataPath - .getParentFile()); - ContainerData containerData = ContainerDataYaml.readContainerFile( - containerFile); - KeyValueContainerData keyValueContainerData = (KeyValueContainerData) - containerData; - keyValueContainerData.setDbFile(KeyValueContainerLocationUtil - .getContainerDBFile(metdataPath, containerId)); - db = BlockUtils.getDB(keyValueContainerData, new - OzoneConfiguration()); - blockIterator = db.getStore().iterator(); - blockFilter = filter; - } - - /** - * This method returns blocks matching with the filter. - * @return next block or null if no more blocks - * @throws IOException - */ - @Override - public BlockData nextBlock() throws IOException, NoSuchElementException { - if (nextBlock != null) { - BlockData currentBlock = nextBlock; - nextBlock = null; - return currentBlock; - } - if(hasNext()) { - return nextBlock(); - } - throw new NoSuchElementException("Block Iterator reached end for " + - "ContainerID " + containerId); - } - - @Override - public boolean hasNext() throws IOException { - if (nextBlock != null) { - return true; - } - if (blockIterator.hasNext()) { - KeyValue block = blockIterator.next(); - if (blockFilter.filterKey(null, block.getKey(), null)) { - nextBlock = BlockUtils.getBlockData(block.getValue()); - if (LOG.isTraceEnabled()) { - LOG.trace("Block matching with filter found: blockID is : {} for " + - "containerID {}", nextBlock.getLocalID(), containerId); - } - return true; - } - hasNext(); - } - return false; - } - - @Override - public void seekToFirst() { - nextBlock = null; - blockIterator.seekToFirst(); - } - - @Override - public void seekToLast() { - nextBlock = null; - blockIterator.seekToLast(); - } - - public void close() { - db.close(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java deleted file mode 100644 index a6e914b90b83d..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ /dev/null @@ -1,730 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.file.Files; -import java.nio.file.StandardCopyOption; -import java.util.Map; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileAlreadyExistsException; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerType; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdfs.util.Canceler; -import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.io.nativeio.NativeIO; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker; -import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers - .KeyValueContainerLocationUtil; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; -import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; - -import com.google.common.base.Preconditions; -import org.apache.commons.io.FileUtils; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.CONTAINER_ALREADY_EXISTS; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.CONTAINER_FILES_CREATE_ERROR; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.CONTAINER_INTERNAL_ERROR; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_OPEN; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.DISK_OUT_OF_SPACE; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.ERROR_IN_COMPACT_DB; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.ERROR_IN_DB_SYNC; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.INVALID_CONTAINER_STATE; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.UNSUPPORTED_REQUEST; - -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class to perform KeyValue Container operations. Any modifications to - * KeyValueContainer object should ideally be done via api exposed in - * KeyValueHandler class. - */ -public class KeyValueContainer implements Container { - - private static final Logger LOG = LoggerFactory.getLogger(Container.class); - - // Use a non-fair RW lock for better throughput, we may revisit this decision - // if this causes fairness issues. - private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - - private final KeyValueContainerData containerData; - private Configuration config; - - public KeyValueContainer(KeyValueContainerData containerData, Configuration - ozoneConfig) { - Preconditions.checkNotNull(containerData, "KeyValueContainerData cannot " + - "be null"); - Preconditions.checkNotNull(ozoneConfig, "Ozone configuration cannot " + - "be null"); - this.config = ozoneConfig; - this.containerData = containerData; - } - - @Override - public void create(VolumeSet volumeSet, VolumeChoosingPolicy - volumeChoosingPolicy, String scmId) throws StorageContainerException { - Preconditions.checkNotNull(volumeChoosingPolicy, "VolumeChoosingPolicy " + - "cannot be null"); - Preconditions.checkNotNull(volumeSet, "VolumeSet cannot be null"); - Preconditions.checkNotNull(scmId, "scmId cannot be null"); - - File containerMetaDataPath = null; - //acquiring volumeset read lock - long maxSize = containerData.getMaxSize(); - volumeSet.readLock(); - try { - HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet - .getVolumesList(), maxSize); - String hddsVolumeDir = containerVolume.getHddsRootDir().toString(); - - long containerID = containerData.getContainerID(); - - containerMetaDataPath = KeyValueContainerLocationUtil - .getContainerMetaDataPath(hddsVolumeDir, scmId, containerID); - containerData.setMetadataPath(containerMetaDataPath.getPath()); - - File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath( - hddsVolumeDir, scmId, containerID); - - // Check if it is new Container. - ContainerUtils.verifyIsNewContainer(containerMetaDataPath); - - //Create Metadata path chunks path and metadata db - File dbFile = getContainerDBFile(); - KeyValueContainerUtil.createContainerMetaData(containerMetaDataPath, - chunksPath, dbFile, config); - - String impl = config.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT); - - //Set containerData for the KeyValueContainer. - containerData.setChunksPath(chunksPath.getPath()); - containerData.setContainerDBType(impl); - containerData.setDbFile(dbFile); - containerData.setVolume(containerVolume); - - // Create .container file - File containerFile = getContainerFile(); - createContainerFile(containerFile); - - } catch (StorageContainerException ex) { - if (containerMetaDataPath != null && containerMetaDataPath.getParentFile() - .exists()) { - FileUtil.fullyDelete(containerMetaDataPath.getParentFile()); - } - throw ex; - } catch (DiskOutOfSpaceException ex) { - throw new StorageContainerException("Container creation failed, due to " + - "disk out of space", ex, DISK_OUT_OF_SPACE); - } catch (FileAlreadyExistsException ex) { - throw new StorageContainerException("Container creation failed because " + - "ContainerFile already exists", ex, CONTAINER_ALREADY_EXISTS); - } catch (IOException ex) { - if (containerMetaDataPath != null && containerMetaDataPath.getParentFile() - .exists()) { - FileUtil.fullyDelete(containerMetaDataPath.getParentFile()); - } - throw new StorageContainerException("Container creation failed.", ex, - CONTAINER_INTERNAL_ERROR); - } finally { - volumeSet.readUnlock(); - } - } - - /** - * Set all of the path realted container data fields based on the name - * conventions. - * - * @param scmId - * @param containerVolume - * @param hddsVolumeDir - */ - public void populatePathFields(String scmId, - HddsVolume containerVolume, String hddsVolumeDir) { - - long containerId = containerData.getContainerID(); - - File containerMetaDataPath = KeyValueContainerLocationUtil - .getContainerMetaDataPath(hddsVolumeDir, scmId, containerId); - - File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath( - hddsVolumeDir, scmId, containerId); - File dbFile = KeyValueContainerLocationUtil.getContainerDBFile( - containerMetaDataPath, containerId); - - //Set containerData for the KeyValueContainer. - containerData.setMetadataPath(containerMetaDataPath.getPath()); - containerData.setChunksPath(chunksPath.getPath()); - containerData.setDbFile(dbFile); - containerData.setVolume(containerVolume); - } - - /** - * Writes to .container file. - * - * @param containerFile container file name - * @param isCreate True if creating a new file. False is updating an - * existing container file. - * @throws StorageContainerException - */ - private void writeToContainerFile(File containerFile, boolean isCreate) - throws StorageContainerException { - File tempContainerFile = null; - long containerId = containerData.getContainerID(); - try { - tempContainerFile = createTempFile(containerFile); - ContainerDataYaml.createContainerFile( - ContainerType.KeyValueContainer, containerData, tempContainerFile); - - // NativeIO.renameTo is an atomic function. But it might fail if the - // container file already exists. Hence, we handle the two cases - // separately. - if (isCreate) { - NativeIO.renameTo(tempContainerFile, containerFile); - } else { - Files.move(tempContainerFile.toPath(), containerFile.toPath(), - StandardCopyOption.REPLACE_EXISTING); - } - - } catch (IOException ex) { - throw new StorageContainerException("Error while creating/ updating " + - ".container file. ContainerID: " + containerId, ex, - CONTAINER_FILES_CREATE_ERROR); - } finally { - if (tempContainerFile != null && tempContainerFile.exists()) { - if (!tempContainerFile.delete()) { - LOG.warn("Unable to delete container temporary file: {}.", - tempContainerFile.getAbsolutePath()); - } - } - } - } - - private void createContainerFile(File containerFile) - throws StorageContainerException { - writeToContainerFile(containerFile, true); - } - - private void updateContainerFile(File containerFile) - throws StorageContainerException { - writeToContainerFile(containerFile, false); - } - - - @Override - public void delete() throws StorageContainerException { - long containerId = containerData.getContainerID(); - try { - KeyValueContainerUtil.removeContainer(containerData, config); - } catch (StorageContainerException ex) { - throw ex; - } catch (IOException ex) { - // TODO : An I/O error during delete can leave partial artifacts on the - // disk. We will need the cleaner thread to cleanup this information. - String errMsg = String.format("Failed to cleanup container. ID: %d", - containerId); - LOG.error(errMsg, ex); - throw new StorageContainerException(errMsg, ex, CONTAINER_INTERNAL_ERROR); - } - } - - @Override - public void markContainerForClose() throws StorageContainerException { - writeLock(); - try { - if (getContainerState() != ContainerDataProto.State.OPEN) { - throw new StorageContainerException( - "Attempting to close a " + getContainerState() + " container.", - CONTAINER_NOT_OPEN); - } - updateContainerData(() -> - containerData.setState(ContainerDataProto.State.CLOSING)); - } finally { - writeUnlock(); - } - } - - @Override - public void markContainerUnhealthy() throws StorageContainerException { - writeLock(); - try { - updateContainerData(() -> - containerData.setState(ContainerDataProto.State.UNHEALTHY)); - } finally { - writeUnlock(); - } - } - - @Override - public void quasiClose() throws StorageContainerException { - // The DB must be synced during close operation - flushAndSyncDB(); - - writeLock(); - try { - // Second sync should be a very light operation as sync has already - // been done outside the lock. - flushAndSyncDB(); - updateContainerData(containerData::quasiCloseContainer); - } finally { - writeUnlock(); - } - } - - @Override - public void close() throws StorageContainerException { - // The DB must be synced during close operation - flushAndSyncDB(); - - writeLock(); - try { - // Second sync should be a very light operation as sync has already - // been done outside the lock. - flushAndSyncDB(); - updateContainerData(containerData::closeContainer); - } finally { - writeUnlock(); - } - LOG.info("Container {} is closed with bcsId {}.", - containerData.getContainerID(), - containerData.getBlockCommitSequenceId()); - } - - /** - * - * Must be invoked with the writeLock held. - * - * @param update - * @throws StorageContainerException - */ - private void updateContainerData(Runnable update) - throws StorageContainerException { - Preconditions.checkState(hasWriteLock()); - ContainerDataProto.State oldState = null; - try { - oldState = containerData.getState(); - update.run(); - File containerFile = getContainerFile(); - // update the new container data to .container File - updateContainerFile(containerFile); - - } catch (StorageContainerException ex) { - if (oldState != null - && containerData.getState() != ContainerDataProto.State.UNHEALTHY) { - // Failed to update .container file. Reset the state to old state only - // if the current state is not unhealthy. - containerData.setState(oldState); - } - throw ex; - } - } - - private void compactDB() throws StorageContainerException { - try { - try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { - db.getStore().compactDB(); - } - } catch (StorageContainerException ex) { - throw ex; - } catch (IOException ex) { - LOG.error("Error in DB compaction while closing container", ex); - throw new StorageContainerException(ex, ERROR_IN_COMPACT_DB); - } - } - - private void flushAndSyncDB() throws StorageContainerException { - try { - try (ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { - db.getStore().flushDB(true); - LOG.info("Container {} is synced with bcsId {}.", - containerData.getContainerID(), - containerData.getBlockCommitSequenceId()); - } - } catch (StorageContainerException ex) { - throw ex; - } catch (IOException ex) { - LOG.error("Error in DB sync while closing container", ex); - throw new StorageContainerException(ex, ERROR_IN_DB_SYNC); - } - } - - @Override - public KeyValueContainerData getContainerData() { - return containerData; - } - - @Override - public ContainerProtos.ContainerDataProto.State getContainerState() { - return containerData.getState(); - } - - @Override - public ContainerType getContainerType() { - return ContainerType.KeyValueContainer; - } - - @Override - public void update( - Map metadata, boolean forceUpdate) - throws StorageContainerException { - - // TODO: Now, when writing the updated data to .container file, we are - // holding lock and writing data to disk. We can have async implementation - // to flush the update container data to disk. - long containerId = containerData.getContainerID(); - if(!containerData.isValid()) { - LOG.debug("Invalid container data. ContainerID: {}", containerId); - throw new StorageContainerException("Invalid container data. " + - "ContainerID: " + containerId, INVALID_CONTAINER_STATE); - } - if (!forceUpdate && !containerData.isOpen()) { - throw new StorageContainerException( - "Updating a closed container without force option is not allowed. " + - "ContainerID: " + containerId, UNSUPPORTED_REQUEST); - } - - Map oldMetadata = containerData.getMetadata(); - try { - writeLock(); - for (Map.Entry entry : metadata.entrySet()) { - containerData.addMetadata(entry.getKey(), entry.getValue()); - } - - File containerFile = getContainerFile(); - // update the new container data to .container File - updateContainerFile(containerFile); - } catch (StorageContainerException ex) { - containerData.setMetadata(oldMetadata); - throw ex; - } finally { - writeUnlock(); - } - } - - @Override - public void updateDeleteTransactionId(long deleteTransactionId) { - containerData.updateDeleteTransactionId(deleteTransactionId); - } - - @Override - public KeyValueBlockIterator blockIterator() throws IOException{ - return new KeyValueBlockIterator(containerData.getContainerID(), new File( - containerData.getContainerPath())); - } - - @Override - public void importContainerData(InputStream input, - ContainerPacker packer) throws IOException { - writeLock(); - try { - if (getContainerFile().exists()) { - String errorMessage = String.format( - "Can't import container (cid=%d) data to a specific location" - + " as the container descriptor (%s) has already been exist.", - getContainerData().getContainerID(), - getContainerFile().getAbsolutePath()); - throw new IOException(errorMessage); - } - //copy the values from the input stream to the final destination - // directory. - byte[] descriptorContent = packer.unpackContainerData(this, input); - - Preconditions.checkNotNull(descriptorContent, - "Container descriptor is missing from the container archive: " - + getContainerData().getContainerID()); - - //now, we have extracted the container descriptor from the previous - //datanode. We can load it and upload it with the current data - // (original metadata + current filepath fields) - KeyValueContainerData originalContainerData = - (KeyValueContainerData) ContainerDataYaml - .readContainer(descriptorContent); - - - containerData.setState(originalContainerData.getState()); - containerData - .setContainerDBType(originalContainerData.getContainerDBType()); - containerData.setBytesUsed(originalContainerData.getBytesUsed()); - - //rewriting the yaml file with new checksum calculation. - update(originalContainerData.getMetadata(), true); - - //fill in memory stat counter (keycount, byte usage) - KeyValueContainerUtil.parseKVContainerData(containerData, config); - - } catch (Exception ex) { - //delete all the temporary data in case of any exception. - try { - FileUtils.deleteDirectory(new File(containerData.getMetadataPath())); - FileUtils.deleteDirectory(new File(containerData.getChunksPath())); - FileUtils.deleteDirectory(getContainerFile()); - } catch (Exception deleteex) { - LOG.error( - "Can not cleanup destination directories after a container import" - + " error (cid" + - containerData.getContainerID() + ")", deleteex); - } - throw ex; - } finally { - writeUnlock(); - } - } - - @Override - public void exportContainerData(OutputStream destination, - ContainerPacker packer) throws IOException { - if (getContainerData().getState() != - ContainerProtos.ContainerDataProto.State.CLOSED) { - throw new IllegalStateException( - "Only closed containers could be exported: ContainerId=" - + getContainerData().getContainerID()); - } - compactDB(); - packer.pack(this, destination); - } - - /** - * Acquire read lock. - */ - public void readLock() { - this.lock.readLock().lock(); - - } - - /** - * Release read lock. - */ - public void readUnlock() { - this.lock.readLock().unlock(); - } - - /** - * Check if the current thread holds read lock. - */ - public boolean hasReadLock() { - return this.lock.readLock().tryLock(); - } - - /** - * Acquire write lock. - */ - public void writeLock() { - // TODO: The lock for KeyValueContainer object should not be exposed - // publicly. - this.lock.writeLock().lock(); - } - - /** - * Release write lock. - */ - public void writeUnlock() { - this.lock.writeLock().unlock(); - - } - - /** - * Check if the current thread holds write lock. - */ - public boolean hasWriteLock() { - return this.lock.writeLock().isHeldByCurrentThread(); - } - - /** - * Acquire read lock, unless interrupted while waiting. - * @throws InterruptedException - */ - @Override - public void readLockInterruptibly() throws InterruptedException { - this.lock.readLock().lockInterruptibly(); - } - - /** - * Acquire write lock, unless interrupted while waiting. - * @throws InterruptedException - */ - @Override - public void writeLockInterruptibly() throws InterruptedException { - this.lock.writeLock().lockInterruptibly(); - - } - - /** - * Returns containerFile. - * @return .container File name - */ - @Override - public File getContainerFile() { - return getContainerFile(containerData.getMetadataPath(), - containerData.getContainerID()); - } - - static File getContainerFile(String metadataPath, long containerId) { - return new File(metadataPath, - containerId + OzoneConsts.CONTAINER_EXTENSION); - } - - @Override - public void updateBlockCommitSequenceId(long blockCommitSequenceId) { - containerData.updateBlockCommitSequenceId(blockCommitSequenceId); - } - - @Override - public long getBlockCommitSequenceId() { - return containerData.getBlockCommitSequenceId(); - } - - - /** - * Returns KeyValueContainerReport for the KeyValueContainer. - */ - @Override - public ContainerReplicaProto getContainerReport() - throws StorageContainerException { - ContainerReplicaProto.Builder ciBuilder = - ContainerReplicaProto.newBuilder(); - ciBuilder.setContainerID(containerData.getContainerID()) - .setReadCount(containerData.getReadCount()) - .setWriteCount(containerData.getWriteCount()) - .setReadBytes(containerData.getReadBytes()) - .setWriteBytes(containerData.getWriteBytes()) - .setKeyCount(containerData.getKeyCount()) - .setUsed(containerData.getBytesUsed()) - .setState(getHddsState()) - .setDeleteTransactionId(containerData.getDeleteTransactionId()) - .setBlockCommitSequenceId(containerData.getBlockCommitSequenceId()) - .setOriginNodeId(containerData.getOriginNodeId()); - return ciBuilder.build(); - } - - /** - * Returns LifeCycle State of the container. - * @return LifeCycle State of the container in HddsProtos format - * @throws StorageContainerException - */ - private ContainerReplicaProto.State getHddsState() - throws StorageContainerException { - ContainerReplicaProto.State state; - switch (containerData.getState()) { - case OPEN: - state = ContainerReplicaProto.State.OPEN; - break; - case CLOSING: - state = ContainerReplicaProto.State.CLOSING; - break; - case QUASI_CLOSED: - state = ContainerReplicaProto.State.QUASI_CLOSED; - break; - case CLOSED: - state = ContainerReplicaProto.State.CLOSED; - break; - case UNHEALTHY: - state = ContainerReplicaProto.State.UNHEALTHY; - break; - default: - throw new StorageContainerException("Invalid Container state found: " + - containerData.getContainerID(), INVALID_CONTAINER_STATE); - } - return state; - } - - /** - * Returns container DB file. - * @return - */ - public File getContainerDBFile() { - return new File(containerData.getMetadataPath(), containerData - .getContainerID() + OzoneConsts.DN_CONTAINER_DB); - } - - public boolean scanMetaData() { - long containerId = containerData.getContainerID(); - KeyValueContainerCheck checker = - new KeyValueContainerCheck(containerData.getMetadataPath(), config, - containerId); - return checker.fastCheck(); - } - - @Override - public boolean shouldScanData() { - return containerData.getState() == ContainerDataProto.State.CLOSED - || containerData.getState() == ContainerDataProto.State.QUASI_CLOSED; - } - - public boolean scanData(DataTransferThrottler throttler, Canceler canceler) { - if (!shouldScanData()) { - throw new IllegalStateException("The checksum verification can not be" + - " done for container in state " - + containerData.getState()); - } - - long containerId = containerData.getContainerID(); - KeyValueContainerCheck checker = - new KeyValueContainerCheck(containerData.getMetadataPath(), config, - containerId); - - return checker.fullCheck(throttler, canceler); - } - - private enum ContainerCheckLevel { - NO_CHECK, FAST_CHECK, FULL_CHECK - } - - /** - * Creates a temporary file. - * @param file - * @return - * @throws IOException - */ - private File createTempFile(File file) throws IOException{ - return File.createTempFile("tmp_" + System.currentTimeMillis() + "_", - file.getName(), file.getParentFile()); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java deleted file mode 100644 index a4bd37623113f..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdfs.util.Canceler; -import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; -import org.apache.hadoop.ozone.common.OzoneChecksumException; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.Arrays; - -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; - -/** - * Class to run integrity checks on Datanode Containers. - * Provide infra for Data Scrubbing - */ - -public class KeyValueContainerCheck { - - private static final Logger LOG = LoggerFactory.getLogger(Container.class); - - private long containerID; - private KeyValueContainerData onDiskContainerData; //loaded from fs/disk - private Configuration checkConfig; - - private String metadataPath; - - public KeyValueContainerCheck(String metadataPath, Configuration conf, - long containerID) { - Preconditions.checkArgument(metadataPath != null); - - this.checkConfig = conf; - this.containerID = containerID; - this.onDiskContainerData = null; - this.metadataPath = metadataPath; - } - - /** - * Run basic integrity checks on container metadata. - * These checks do not look inside the metadata files. - * Applicable for OPEN containers. - * - * @return true : integrity checks pass, false : otherwise. - */ - public boolean fastCheck() { - LOG.info("Running basic checks for container {};", containerID); - boolean valid = false; - try { - loadContainerData(); - checkLayout(); - checkContainerFile(); - valid = true; - - } catch (IOException e) { - handleCorruption(e); - } - - return valid; - } - - /** - * full checks comprise scanning all metadata inside the container. - * Including the KV database. These checks are intrusive, consume more - * resources compared to fast checks and should only be done on Closed - * or Quasi-closed Containers. Concurrency being limited to delete - * workflows. - *

- * fullCheck is a superset of fastCheck - * - * @return true : integrity checks pass, false : otherwise. - */ - public boolean fullCheck(DataTransferThrottler throttler, Canceler canceler) { - boolean valid; - - try { - valid = fastCheck(); - if (valid) { - scanData(throttler, canceler); - } - } catch (IOException e) { - handleCorruption(e); - valid = false; - } - - return valid; - } - - /** - * Check the integrity of the directory structure of the container. - */ - private void checkLayout() throws IOException { - - // is metadataPath accessible as a directory? - checkDirPath(metadataPath); - - // is chunksPath accessible as a directory? - String chunksPath = onDiskContainerData.getChunksPath(); - checkDirPath(chunksPath); - } - - private void checkDirPath(String path) throws IOException { - - File dirPath = new File(path); - String errStr; - - try { - if (!dirPath.isDirectory()) { - errStr = "Not a directory [" + path + "]"; - throw new IOException(errStr); - } - } catch (SecurityException se) { - throw new IOException("Security exception checking dir [" - + path + "]", se); - } - - String[] ls = dirPath.list(); - if (ls == null) { - // null result implies operation failed - errStr = "null listing for directory [" + path + "]"; - throw new IOException(errStr); - } - } - - private void checkContainerFile() throws IOException { - /* - * compare the values in the container file loaded from disk, - * with the values we are expecting - */ - String dbType; - Preconditions - .checkState(onDiskContainerData != null, "Container File not loaded"); - - ContainerUtils.verifyChecksum(onDiskContainerData); - - if (onDiskContainerData.getContainerType() - != ContainerProtos.ContainerType.KeyValueContainer) { - String errStr = "Bad Container type in Containerdata for " + containerID; - throw new IOException(errStr); - } - - if (onDiskContainerData.getContainerID() != containerID) { - String errStr = - "Bad ContainerID field in Containerdata for " + containerID; - throw new IOException(errStr); - } - - dbType = onDiskContainerData.getContainerDBType(); - if (!dbType.equals(OZONE_METADATA_STORE_IMPL_ROCKSDB) && - !dbType.equals(OZONE_METADATA_STORE_IMPL_LEVELDB)) { - String errStr = "Unknown DBType [" + dbType - + "] in Container File for [" + containerID + "]"; - throw new IOException(errStr); - } - - KeyValueContainerData kvData = onDiskContainerData; - if (!metadataPath.equals(kvData.getMetadataPath())) { - String errStr = - "Bad metadata path in Containerdata for " + containerID + "Expected [" - + metadataPath + "] Got [" + kvData.getMetadataPath() - + "]"; - throw new IOException(errStr); - } - } - - private void scanData(DataTransferThrottler throttler, Canceler canceler) - throws IOException { - /* - * Check the integrity of the DB inside each container. - * 1. iterate over each key (Block) and locate the chunks for the block - * 2. garbage detection (TBD): chunks which exist in the filesystem, - * but not in the DB. This function will be implemented in HDDS-1202 - * 3. chunk checksum verification. - */ - Preconditions.checkState(onDiskContainerData != null, - "invoke loadContainerData prior to calling this function"); - File dbFile; - File metaDir = new File(metadataPath); - - dbFile = KeyValueContainerLocationUtil - .getContainerDBFile(metaDir, containerID); - - if (!dbFile.exists() || !dbFile.canRead()) { - String dbFileErrorMsg = "Unable to access DB File [" + dbFile.toString() - + "] for Container [" + containerID + "] metadata path [" - + metadataPath + "]"; - throw new IOException(dbFileErrorMsg); - } - - onDiskContainerData.setDbFile(dbFile); - try(ReferenceCountedDB db = - BlockUtils.getDB(onDiskContainerData, checkConfig); - KeyValueBlockIterator kvIter = new KeyValueBlockIterator(containerID, - new File(onDiskContainerData.getContainerPath()))) { - - while(kvIter.hasNext()) { - BlockData block = kvIter.nextBlock(); - for(ContainerProtos.ChunkInfo chunk : block.getChunks()) { - File chunkFile = ChunkUtils.getChunkFile(onDiskContainerData, - ChunkInfo.getFromProtoBuf(chunk)); - if (!chunkFile.exists()) { - // concurrent mutation in Block DB? lookup the block again. - byte[] bdata = db.getStore().get( - Longs.toByteArray(block.getBlockID().getLocalID())); - if (bdata != null) { - throw new IOException("Missing chunk file " - + chunkFile.getAbsolutePath()); - } - } else if (chunk.getChecksumData().getType() - != ContainerProtos.ChecksumType.NONE){ - int length = chunk.getChecksumData().getChecksumsList().size(); - ChecksumData cData = new ChecksumData( - chunk.getChecksumData().getType(), - chunk.getChecksumData().getBytesPerChecksum(), - chunk.getChecksumData().getChecksumsList()); - Checksum cal = new Checksum(cData.getChecksumType(), - cData.getBytesPerChecksum()); - long bytesRead = 0; - byte[] buffer = new byte[cData.getBytesPerChecksum()]; - try (InputStream fs = new FileInputStream(chunkFile)) { - for (int i = 0; i < length; i++) { - int v = fs.read(buffer); - if (v == -1) { - break; - } - bytesRead += v; - throttler.throttle(v, canceler); - ByteString expected = cData.getChecksums().get(i); - ByteString actual = cal.computeChecksum(buffer, 0, v) - .getChecksums().get(0); - if (!Arrays.equals(expected.toByteArray(), - actual.toByteArray())) { - throw new OzoneChecksumException(String - .format("Inconsistent read for chunk=%s len=%d expected" + - " checksum %s actual checksum %s for block %s", - chunk.getChunkName(), chunk.getLen(), - Arrays.toString(expected.toByteArray()), - Arrays.toString(actual.toByteArray()), - block.getBlockID())); - } - - } - if (bytesRead != chunk.getLen()) { - throw new OzoneChecksumException(String - .format("Inconsistent read for chunk=%s expected length=%d" - + " actual length=%d for block %s", - chunk.getChunkName(), - chunk.getLen(), bytesRead, block.getBlockID())); - } - } - } - } - } - } - } - - private void loadContainerData() throws IOException { - File containerFile = KeyValueContainer - .getContainerFile(metadataPath, containerID); - - onDiskContainerData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - } - - private void handleCorruption(IOException e) { - String errStr = - "Corruption detected in container: [" + containerID + "] "; - String logMessage = errStr + "Exception: [" + e.getMessage() + "]"; - LOG.error(logMessage); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java deleted file mode 100644 index 2a9eedc6d1e29..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ /dev/null @@ -1,276 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import com.google.common.collect.Lists; -import java.util.Collections; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.yaml.snakeyaml.nodes.Tag; - - -import java.io.File; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; - -import static java.lang.Math.max; -import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH; -import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE; -import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH; - -/** - * This class represents the KeyValueContainer metadata, which is the - * in-memory representation of container metadata and is represented on disk - * by the .container file. - */ -public class KeyValueContainerData extends ContainerData { - - // Yaml Tag used for KeyValueContainerData. - public static final Tag KEYVALUE_YAML_TAG = new Tag("KeyValueContainerData"); - - // Fields need to be stored in .container file. - private static final List KV_YAML_FIELDS; - - // Path to Container metadata Level DB/RocksDB Store and .container file. - private String metadataPath; - - // Path to Physical file system where chunks are stored. - private String chunksPath; - - //Type of DB used to store key to chunks mapping - private String containerDBType; - - private File dbFile = null; - - /** - * Number of pending deletion blocks in KeyValueContainer. - */ - private final AtomicInteger numPendingDeletionBlocks; - - private long deleteTransactionId; - - private long blockCommitSequenceId; - - static { - // Initialize YAML fields - KV_YAML_FIELDS = Lists.newArrayList(); - KV_YAML_FIELDS.addAll(YAML_FIELDS); - KV_YAML_FIELDS.add(METADATA_PATH); - KV_YAML_FIELDS.add(CHUNKS_PATH); - KV_YAML_FIELDS.add(CONTAINER_DB_TYPE); - } - - /** - * Constructs KeyValueContainerData object. - * @param id - ContainerId - * @param size - maximum size of the container in bytes - */ - public KeyValueContainerData(long id, long size, - String originPipelineId, String originNodeId) { - super(ContainerProtos.ContainerType.KeyValueContainer, id, size, - originPipelineId, originNodeId); - this.numPendingDeletionBlocks = new AtomicInteger(0); - this.deleteTransactionId = 0; - } - - /** - * Constructs KeyValueContainerData object. - * @param id - ContainerId - * @param layOutVersion - * @param size - maximum size of the container in bytes - */ - public KeyValueContainerData(long id, int layOutVersion, long size, - String originPipelineId, String originNodeId) { - super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion, - size, originPipelineId, originNodeId); - this.numPendingDeletionBlocks = new AtomicInteger(0); - this.deleteTransactionId = 0; - } - - - /** - * Sets Container dbFile. This should be called only during creation of - * KeyValue container. - * @param containerDbFile - */ - public void setDbFile(File containerDbFile) { - dbFile = containerDbFile; - } - - /** - * Returns container DB file. - * @return dbFile - */ - public File getDbFile() { - return dbFile; - } - - /** - * Returns container metadata path. - * @return - Physical path where container file and checksum is stored. - */ - public String getMetadataPath() { - return metadataPath; - } - - /** - * Sets container metadata path. - * - * @param path - String. - */ - public void setMetadataPath(String path) { - this.metadataPath = path; - } - - /** - * Returns the path to base dir of the container. - * @return Path to base dir - */ - public String getContainerPath() { - if (metadataPath == null) { - return null; - } - return new File(metadataPath).getParent(); - } - - /** - * Returns the blockCommitSequenceId. - */ - public long getBlockCommitSequenceId() { - return blockCommitSequenceId; - } - - /** - * updates the blockCommitSequenceId. - */ - public void updateBlockCommitSequenceId(long id) { - this.blockCommitSequenceId = id; - } - - /** - * Get chunks path. - * @return - Path where chunks are stored - */ - public String getChunksPath() { - return chunksPath; - } - - /** - * Set chunks Path. - * @param chunkPath - File path. - */ - public void setChunksPath(String chunkPath) { - this.chunksPath = chunkPath; - } - - /** - * Returns the DBType used for the container. - * @return containerDBType - */ - public String getContainerDBType() { - return containerDBType; - } - - /** - * Sets the DBType used for the container. - * @param containerDBType - */ - public void setContainerDBType(String containerDBType) { - this.containerDBType = containerDBType; - } - - /** - * Increase the count of pending deletion blocks. - * - * @param numBlocks increment number - */ - public void incrPendingDeletionBlocks(int numBlocks) { - this.numPendingDeletionBlocks.addAndGet(numBlocks); - } - - /** - * Decrease the count of pending deletion blocks. - * - * @param numBlocks decrement number - */ - public void decrPendingDeletionBlocks(int numBlocks) { - this.numPendingDeletionBlocks.addAndGet(-1 * numBlocks); - } - - /** - * Get the number of pending deletion blocks. - */ - public int getNumPendingDeletionBlocks() { - return this.numPendingDeletionBlocks.get(); - } - - /** - * Sets deleteTransactionId to latest delete transactionId for the container. - * - * @param transactionId latest transactionId of the container. - */ - public void updateDeleteTransactionId(long transactionId) { - deleteTransactionId = max(transactionId, deleteTransactionId); - } - - /** - * Return the latest deleteTransactionId of the container. - */ - public long getDeleteTransactionId() { - return deleteTransactionId; - } - - /** - * Returns a ProtoBuf Message from ContainerData. - * - * @return Protocol Buffer Message - */ - public ContainerDataProto getProtoBufMessage() { - ContainerDataProto.Builder builder = ContainerDataProto.newBuilder(); - builder.setContainerID(this.getContainerID()); - builder.setContainerPath(this.getMetadataPath()); - builder.setState(this.getState()); - - for (Map.Entry entry : getMetadata().entrySet()) { - ContainerProtos.KeyValue.Builder keyValBuilder = - ContainerProtos.KeyValue.newBuilder(); - builder.addMetadata(keyValBuilder.setKey(entry.getKey()) - .setValue(entry.getValue()).build()); - } - - if (this.getBytesUsed() >= 0) { - builder.setBytesUsed(this.getBytesUsed()); - } - - if(this.getContainerType() != null) { - builder.setContainerType(ContainerProtos.ContainerType.KeyValueContainer); - } - - return builder.build(); - } - - public static List getYamlFields() { - return Collections.unmodifiableList(KV_YAML_FIELDS); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java deleted file mode 100644 index bc418839f28bb..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ /dev/null @@ -1,1043 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Function; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto.State; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .GetSmallFileRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .PutSmallFileRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.hdds.scm.ByteStringConversion; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.ratis - .DispatcherContext; -import org.apache.hadoop.ozone.container.common.transport.server.ratis - .DispatcherContext.WriteChunkStage; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume - .RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; -import org.apache.hadoop.ozone.container.keyvalue.helpers.SmallFileUtils; -import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory; -import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl; -import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; -import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; -import org.apache.hadoop.util.AutoCloseableLock; -import org.apache.hadoop.util.ReflectionUtils; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_DATANODE_VOLUME_CHOOSING_POLICY; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - Result.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handler for KeyValue Container type. - */ -public class KeyValueHandler extends Handler { - - private static final Logger LOG = LoggerFactory.getLogger( - KeyValueHandler.class); - - private final ContainerType containerType; - private final BlockManager blockManager; - private final ChunkManager chunkManager; - private final VolumeChoosingPolicy volumeChoosingPolicy; - private final long maxContainerSize; - private final Function byteBufferToByteString; - - // A lock that is held during container creation. - private final AutoCloseableLock containerCreationLock; - private final boolean doSyncWrite; - - public KeyValueHandler(Configuration config, StateContext context, - ContainerSet contSet, VolumeSet volSet, ContainerMetrics metrics) { - super(config, context, contSet, volSet, metrics); - containerType = ContainerType.KeyValueContainer; - blockManager = new BlockManagerImpl(config); - doSyncWrite = - conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY, - OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT); - chunkManager = ChunkManagerFactory.getChunkManager(config, doSyncWrite); - volumeChoosingPolicy = ReflectionUtils.newInstance(conf.getClass( - HDDS_DATANODE_VOLUME_CHOOSING_POLICY, RoundRobinVolumeChoosingPolicy - .class, VolumeChoosingPolicy.class), conf); - maxContainerSize = (long)config.getStorageSize( - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); - // this handler lock is used for synchronizing createContainer Requests, - // so using a fair lock here. - containerCreationLock = new AutoCloseableLock(new ReentrantLock(true)); - byteBufferToByteString = - ByteStringConversion.createByteBufferConversion(conf); - } - - @VisibleForTesting - public VolumeChoosingPolicy getVolumeChoosingPolicyForTesting() { - return volumeChoosingPolicy; - } - - @Override - public void stop() { - } - - @Override - public ContainerCommandResponseProto handle( - ContainerCommandRequestProto request, Container container, - DispatcherContext dispatcherContext) { - - Type cmdType = request.getCmdType(); - KeyValueContainer kvContainer = (KeyValueContainer) container; - switch(cmdType) { - case CreateContainer: - return handleCreateContainer(request, kvContainer); - case ReadContainer: - return handleReadContainer(request, kvContainer); - case UpdateContainer: - return handleUpdateContainer(request, kvContainer); - case DeleteContainer: - return handleDeleteContainer(request, kvContainer); - case ListContainer: - return handleUnsupportedOp(request); - case CloseContainer: - return handleCloseContainer(request, kvContainer); - case PutBlock: - return handlePutBlock(request, kvContainer, dispatcherContext); - case GetBlock: - return handleGetBlock(request, kvContainer); - case DeleteBlock: - return handleDeleteBlock(request, kvContainer); - case ListBlock: - return handleUnsupportedOp(request); - case ReadChunk: - return handleReadChunk(request, kvContainer, dispatcherContext); - case DeleteChunk: - return handleDeleteChunk(request, kvContainer); - case WriteChunk: - return handleWriteChunk(request, kvContainer, dispatcherContext); - case ListChunk: - return handleUnsupportedOp(request); - case CompactChunk: - return handleUnsupportedOp(request); - case PutSmallFile: - return handlePutSmallFile(request, kvContainer, dispatcherContext); - case GetSmallFile: - return handleGetSmallFile(request, kvContainer); - case GetCommittedBlockLength: - return handleGetCommittedBlockLength(request, kvContainer); - default: - return null; - } - } - - @VisibleForTesting - public ChunkManager getChunkManager() { - return this.chunkManager; - } - - @VisibleForTesting - public BlockManager getBlockManager() { - return this.blockManager; - } - - /** - * Handles Create Container Request. If successful, adds the container to - * ContainerSet and sends an ICR to the SCM. - */ - ContainerCommandResponseProto handleCreateContainer( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - if (!request.hasCreateContainer()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Create Container request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - // Create Container request should be passed a null container as the - // container would be created here. - Preconditions.checkArgument(kvContainer == null); - - long containerID = request.getContainerID(); - - KeyValueContainerData newContainerData = new KeyValueContainerData( - containerID, maxContainerSize, request.getPipelineID(), - getDatanodeDetails().getUuidString()); - // TODO: Add support to add metadataList to ContainerData. Add metadata - // to container during creation. - KeyValueContainer newContainer = new KeyValueContainer( - newContainerData, conf); - - boolean created = false; - try (AutoCloseableLock l = containerCreationLock.acquire()) { - if (containerSet.getContainer(containerID) == null) { - newContainer.create(volumeSet, volumeChoosingPolicy, scmID); - created = containerSet.addContainer(newContainer); - } else { - // The create container request for an already existing container can - // arrive in case the ContainerStateMachine reapplies the transaction - // on datanode restart. Just log a warning msg here. - LOG.debug("Container already exists." + - "container Id " + containerID); - } - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } - - if (created) { - try { - sendICR(newContainer); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } - } - return ContainerUtils.getSuccessResponse(request); - } - - public void populateContainerPathFields(KeyValueContainer container, - long maxSize) throws IOException { - volumeSet.readLock(); - try { - HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet - .getVolumesList(), maxSize); - String hddsVolumeDir = containerVolume.getHddsRootDir().toString(); - container.populatePathFields(scmID, containerVolume, hddsVolumeDir); - } finally { - volumeSet.readUnlock(); - } - } - - /** - * Handles Read Container Request. Returns the ContainerData as response. - */ - ContainerCommandResponseProto handleReadContainer( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - if (!request.hasReadContainer()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Read Container request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - // The container can become unhealthy after the lock is released. - // The operation will likely fail/timeout in that happens. - try { - checkContainerIsHealthy(kvContainer); - } catch (StorageContainerException sce) { - return ContainerUtils.logAndReturnError(LOG, sce, request); - } - - KeyValueContainerData containerData = kvContainer.getContainerData(); - return KeyValueContainerUtil.getReadContainerResponse( - request, containerData); - } - - - /** - * Handles Update Container Request. If successful, the container metadata - * is updated. - */ - ContainerCommandResponseProto handleUpdateContainer( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - - if (!request.hasUpdateContainer()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Update Container request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - boolean forceUpdate = request.getUpdateContainer().getForceUpdate(); - List keyValueList = - request.getUpdateContainer().getMetadataList(); - Map metadata = new HashMap<>(); - for (KeyValue keyValue : keyValueList) { - metadata.put(keyValue.getKey(), keyValue.getValue()); - } - - try { - if (!metadata.isEmpty()) { - kvContainer.update(metadata, forceUpdate); - } - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } - return ContainerUtils.getSuccessResponse(request); - } - - /** - * Handles Delete Container Request. - * Open containers cannot be deleted. - * Holds writeLock on ContainerSet till the container is removed from - * containerMap. On disk deletion of container files will happen - * asynchronously without the lock. - */ - ContainerCommandResponseProto handleDeleteContainer( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - - if (!request.hasDeleteContainer()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Delete container request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - boolean forceDelete = request.getDeleteContainer().getForceDelete(); - try { - deleteInternal(kvContainer, forceDelete); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } - return ContainerUtils.getSuccessResponse(request); - } - - /** - * Handles Close Container Request. An open container is closed. - * Close Container call is idempotent. - */ - ContainerCommandResponseProto handleCloseContainer( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - - if (!request.hasCloseContainer()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Update Container request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - try { - markContainerForClose(kvContainer); - closeContainer(kvContainer); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Close Container failed", ex, - IO_EXCEPTION), request); - } - - return ContainerUtils.getSuccessResponse(request); - } - - /** - * Handle Put Block operation. Calls BlockManager to process the request. - */ - ContainerCommandResponseProto handlePutBlock( - ContainerCommandRequestProto request, KeyValueContainer kvContainer, - DispatcherContext dispatcherContext) { - - long blockLength; - if (!request.hasPutBlock()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Put Key request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - BlockData blockData; - try { - checkContainerOpen(kvContainer); - - blockData = BlockData.getFromProtoBuf( - request.getPutBlock().getBlockData()); - Preconditions.checkNotNull(blockData); - long bcsId = - dispatcherContext == null ? 0 : dispatcherContext.getLogIndex(); - blockData.setBlockCommitSequenceId(bcsId); - long numBytes = blockData.getProtoBufMessage().toByteArray().length; - blockManager.putBlock(kvContainer, blockData); - metrics.incContainerBytesStats(Type.PutBlock, numBytes); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Put Key failed", ex, IO_EXCEPTION), - request); - } - - return BlockUtils.putBlockResponseSuccess(request, blockData); - } - - /** - * Handle Get Block operation. Calls BlockManager to process the request. - */ - ContainerCommandResponseProto handleGetBlock( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - - if (!request.hasGetBlock()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Get Key request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - // The container can become unhealthy after the lock is released. - // The operation will likely fail/timeout in that happens. - try { - checkContainerIsHealthy(kvContainer); - } catch (StorageContainerException sce) { - return ContainerUtils.logAndReturnError(LOG, sce, request); - } - - BlockData responseData; - try { - BlockID blockID = BlockID.getFromProtobuf( - request.getGetBlock().getBlockID()); - responseData = blockManager.getBlock(kvContainer, blockID); - long numBytes = responseData.getProtoBufMessage().toByteArray().length; - metrics.incContainerBytesStats(Type.GetBlock, numBytes); - - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Get Key failed", ex, IO_EXCEPTION), - request); - } - - return BlockUtils.getBlockDataResponse(request, responseData); - } - - /** - * Handles GetCommittedBlockLength operation. - * Calls BlockManager to process the request. - */ - ContainerCommandResponseProto handleGetCommittedBlockLength( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - if (!request.hasGetCommittedBlockLength()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Get Key request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - // The container can become unhealthy after the lock is released. - // The operation will likely fail/timeout in that happens. - try { - checkContainerIsHealthy(kvContainer); - } catch (StorageContainerException sce) { - return ContainerUtils.logAndReturnError(LOG, sce, request); - } - - long blockLength; - try { - BlockID blockID = BlockID - .getFromProtobuf(request.getGetCommittedBlockLength().getBlockID()); - blockLength = blockManager.getCommittedBlockLength(kvContainer, blockID); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("GetCommittedBlockLength failed", ex, - IO_EXCEPTION), request); - } - - return BlockUtils.getBlockLengthResponse(request, blockLength); - } - - /** - * Handle Delete Block operation. Calls BlockManager to process the request. - */ - ContainerCommandResponseProto handleDeleteBlock( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - - if (!request.hasDeleteBlock()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Delete Key request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - try { - checkContainerOpen(kvContainer); - - BlockID blockID = BlockID.getFromProtobuf( - request.getDeleteBlock().getBlockID()); - - blockManager.deleteBlock(kvContainer, blockID); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Delete Key failed", ex, IO_EXCEPTION), - request); - } - - return BlockUtils.getBlockResponseSuccess(request); - } - - /** - * Handle Read Chunk operation. Calls ChunkManager to process the request. - */ - ContainerCommandResponseProto handleReadChunk( - ContainerCommandRequestProto request, KeyValueContainer kvContainer, - DispatcherContext dispatcherContext) { - - if (!request.hasReadChunk()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Read Chunk request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - // The container can become unhealthy after the lock is released. - // The operation will likely fail/timeout if that happens. - try { - checkContainerIsHealthy(kvContainer); - } catch (StorageContainerException sce) { - return ContainerUtils.logAndReturnError(LOG, sce, request); - } - - ChunkInfo chunkInfo; - ByteBuffer data; - try { - BlockID blockID = BlockID.getFromProtobuf( - request.getReadChunk().getBlockID()); - chunkInfo = ChunkInfo.getFromProtoBuf(request.getReadChunk() - .getChunkData()); - Preconditions.checkNotNull(chunkInfo); - - if (dispatcherContext == null) { - dispatcherContext = new DispatcherContext.Builder().build(); - } - - data = chunkManager - .readChunk(kvContainer, blockID, chunkInfo, dispatcherContext); - metrics.incContainerBytesStats(Type.ReadChunk, chunkInfo.getLen()); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Read Chunk failed", ex, IO_EXCEPTION), - request); - } - - Preconditions.checkNotNull(data, "Chunk data is null"); - - ContainerProtos.ReadChunkResponseProto.Builder response = - ContainerProtos.ReadChunkResponseProto.newBuilder(); - response.setChunkData(chunkInfo.getProtoBufMessage()); - response.setData(byteBufferToByteString.apply(data)); - response.setBlockID(request.getReadChunk().getBlockID()); - - ContainerCommandResponseProto.Builder builder = - ContainerUtils.getSuccessResponseBuilder(request); - builder.setReadChunk(response); - return builder.build(); - } - - /** - * Throw an exception if the container is unhealthy. - * - * @throws StorageContainerException if the container is unhealthy. - * @param kvContainer - */ - @VisibleForTesting - void checkContainerIsHealthy(KeyValueContainer kvContainer) - throws StorageContainerException { - kvContainer.readLock(); - try { - if (kvContainer.getContainerData().getState() == State.UNHEALTHY) { - throw new StorageContainerException( - "The container replica is unhealthy.", - CONTAINER_UNHEALTHY); - } - } finally { - kvContainer.readUnlock(); - } - } - - /** - * Handle Delete Chunk operation. Calls ChunkManager to process the request. - */ - ContainerCommandResponseProto handleDeleteChunk( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - - if (!request.hasDeleteChunk()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Delete Chunk request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - // The container can become unhealthy after the lock is released. - // The operation will likely fail/timeout in that happens. - try { - checkContainerIsHealthy(kvContainer); - } catch (StorageContainerException sce) { - return ContainerUtils.logAndReturnError(LOG, sce, request); - } - - try { - checkContainerOpen(kvContainer); - - BlockID blockID = BlockID.getFromProtobuf( - request.getDeleteChunk().getBlockID()); - ContainerProtos.ChunkInfo chunkInfoProto = request.getDeleteChunk() - .getChunkData(); - ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto); - Preconditions.checkNotNull(chunkInfo); - - chunkManager.deleteChunk(kvContainer, blockID, chunkInfo); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Delete Chunk failed", ex, - IO_EXCEPTION), request); - } - - return ChunkUtils.getChunkResponseSuccess(request); - } - - /** - * Handle Write Chunk operation. Calls ChunkManager to process the request. - */ - ContainerCommandResponseProto handleWriteChunk( - ContainerCommandRequestProto request, KeyValueContainer kvContainer, - DispatcherContext dispatcherContext) { - - if (!request.hasWriteChunk()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Write Chunk request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - try { - checkContainerOpen(kvContainer); - - BlockID blockID = BlockID.getFromProtobuf( - request.getWriteChunk().getBlockID()); - ContainerProtos.ChunkInfo chunkInfoProto = - request.getWriteChunk().getChunkData(); - ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto); - Preconditions.checkNotNull(chunkInfo); - - ByteBuffer data = null; - if (dispatcherContext == null) { - dispatcherContext = new DispatcherContext.Builder().build(); - } - WriteChunkStage stage = dispatcherContext.getStage(); - if (stage == WriteChunkStage.WRITE_DATA || - stage == WriteChunkStage.COMBINED) { - data = request.getWriteChunk().getData().asReadOnlyByteBuffer(); - } - - chunkManager - .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); - - // We should increment stats after writeChunk - if (stage == WriteChunkStage.WRITE_DATA|| - stage == WriteChunkStage.COMBINED) { - metrics.incContainerBytesStats(Type.WriteChunk, request.getWriteChunk() - .getChunkData().getLen()); - } - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Write Chunk failed", ex, IO_EXCEPTION), - request); - } - - return ChunkUtils.getChunkResponseSuccess(request); - } - - /** - * Handle Put Small File operation. Writes the chunk and associated key - * using a single RPC. Calls BlockManager and ChunkManager to process the - * request. - */ - ContainerCommandResponseProto handlePutSmallFile( - ContainerCommandRequestProto request, KeyValueContainer kvContainer, - DispatcherContext dispatcherContext) { - - if (!request.hasPutSmallFile()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Put Small File request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - PutSmallFileRequestProto putSmallFileReq = - request.getPutSmallFile(); - BlockData blockData; - - try { - checkContainerOpen(kvContainer); - - BlockID blockID = BlockID.getFromProtobuf(putSmallFileReq.getBlock() - .getBlockData().getBlockID()); - blockData = BlockData.getFromProtoBuf( - putSmallFileReq.getBlock().getBlockData()); - Preconditions.checkNotNull(blockData); - - ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf( - putSmallFileReq.getChunkInfo()); - Preconditions.checkNotNull(chunkInfo); - ByteBuffer data = putSmallFileReq.getData().asReadOnlyByteBuffer(); - if (dispatcherContext == null) { - dispatcherContext = new DispatcherContext.Builder().build(); - } - - // chunks will be committed as a part of handling putSmallFile - // here. There is no need to maintain this info in openContainerBlockMap. - chunkManager - .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); - - List chunks = new LinkedList<>(); - chunks.add(chunkInfo.getProtoBufMessage()); - blockData.setChunks(chunks); - blockData.setBlockCommitSequenceId(dispatcherContext.getLogIndex()); - - blockManager.putBlock(kvContainer, blockData); - metrics.incContainerBytesStats(Type.PutSmallFile, data.capacity()); - - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Read Chunk failed", ex, - PUT_SMALL_FILE_ERROR), request); - } - - return SmallFileUtils.getPutFileResponseSuccess(request, blockData); - } - - /** - * Handle Get Small File operation. Gets a data stream using a key. This - * helps in reducing the RPC overhead for small files. Calls BlockManager and - * ChunkManager to process the request. - */ - ContainerCommandResponseProto handleGetSmallFile( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - - if (!request.hasGetSmallFile()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Get Small File request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - // The container can become unhealthy after the lock is released. - // The operation will likely fail/timeout in that happens. - try { - checkContainerIsHealthy(kvContainer); - } catch (StorageContainerException sce) { - return ContainerUtils.logAndReturnError(LOG, sce, request); - } - - GetSmallFileRequestProto getSmallFileReq = request.getGetSmallFile(); - - try { - BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getBlock() - .getBlockID()); - BlockData responseData = blockManager.getBlock(kvContainer, blockID); - - ContainerProtos.ChunkInfo chunkInfo = null; - ByteString dataBuf = ByteString.EMPTY; - DispatcherContext dispatcherContext = - new DispatcherContext.Builder().build(); - for (ContainerProtos.ChunkInfo chunk : responseData.getChunks()) { - // if the block is committed, all chunks must have been committed. - // Tmp chunk files won't exist here. - ByteBuffer data = chunkManager.readChunk(kvContainer, blockID, - ChunkInfo.getFromProtoBuf(chunk), dispatcherContext); - ByteString current = byteBufferToByteString.apply(data); - dataBuf = dataBuf.concat(current); - chunkInfo = chunk; - } - metrics.incContainerBytesStats(Type.GetSmallFile, dataBuf.size()); - return SmallFileUtils.getGetSmallFileResponseSuccess(request, dataBuf - .toByteArray(), ChunkInfo.getFromProtoBuf(chunkInfo)); - } catch (StorageContainerException e) { - return ContainerUtils.logAndReturnError(LOG, e, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Write Chunk failed", ex, - GET_SMALL_FILE_ERROR), request); - } - } - - /** - * Handle unsupported operation. - */ - ContainerCommandResponseProto handleUnsupportedOp( - ContainerCommandRequestProto request) { - // TODO : remove all unsupported operations or handle them. - return ContainerUtils.unsupportedRequest(request); - } - - /** - * Check if container is open. Throw exception otherwise. - * @param kvContainer - * @throws StorageContainerException - */ - private void checkContainerOpen(KeyValueContainer kvContainer) - throws StorageContainerException { - - final State containerState = kvContainer.getContainerState(); - - /* - * In a closing state, follower will receive transactions from leader. - * Once the leader is put to closing state, it will reject further requests - * from clients. Only the transactions which happened before the container - * in the leader goes to closing state, will arrive here even the container - * might already be in closing state here. - */ - if (containerState == State.OPEN || containerState == State.CLOSING) { - return; - } - - final ContainerProtos.Result result; - switch (containerState) { - case QUASI_CLOSED: - result = CLOSED_CONTAINER_IO; - break; - case CLOSED: - result = CLOSED_CONTAINER_IO; - break; - case UNHEALTHY: - result = CONTAINER_UNHEALTHY; - break; - case INVALID: - result = INVALID_CONTAINER_STATE; - break; - default: - result = CONTAINER_INTERNAL_ERROR; - } - String msg = "Requested operation not allowed as ContainerState is " + - containerState; - throw new StorageContainerException(msg, result); - } - - @Override - public Container importContainer(final long containerID, - final long maxSize, final String originPipelineId, - final String originNodeId, final InputStream rawContainerStream, - final TarContainerPacker packer) - throws IOException { - - // TODO: Add layout version! - KeyValueContainerData containerData = - new KeyValueContainerData(containerID, - maxSize, originPipelineId, originNodeId); - - KeyValueContainer container = new KeyValueContainer(containerData, - conf); - - populateContainerPathFields(container, maxSize); - container.importContainerData(rawContainerStream, packer); - sendICR(container); - return container; - - } - - @Override - public void exportContainer(final Container container, - final OutputStream outputStream, - final TarContainerPacker packer) - throws IOException{ - container.readLock(); - try { - final KeyValueContainer kvc = (KeyValueContainer) container; - kvc.exportContainerData(outputStream, packer); - } finally { - container.readUnlock(); - } - } - - @Override - public void markContainerForClose(Container container) - throws IOException { - container.writeLock(); - try { - // Move the container to CLOSING state only if it's OPEN - if (container.getContainerState() == State.OPEN) { - container.markContainerForClose(); - sendICR(container); - } - } finally { - container.writeUnlock(); - } - } - - @Override - public void markContainerUnhealthy(Container container) - throws IOException { - container.writeLock(); - try { - if (container.getContainerState() != State.UNHEALTHY) { - try { - container.markContainerUnhealthy(); - } catch (IOException ex) { - // explicitly catch IOException here since the this operation - // will fail if the Rocksdb metadata is corrupted. - long id = container.getContainerData().getContainerID(); - LOG.warn("Unexpected error while marking container " + id - + " as unhealthy", ex); - } finally { - sendICR(container); - } - } - } finally { - container.writeUnlock(); - } - } - - @Override - public void quasiCloseContainer(Container container) - throws IOException { - container.writeLock(); - try { - final State state = container.getContainerState(); - // Quasi close call is idempotent. - if (state == State.QUASI_CLOSED) { - return; - } - // The container has to be in CLOSING state. - if (state != State.CLOSING) { - ContainerProtos.Result error = - state == State.INVALID ? INVALID_CONTAINER_STATE : - CONTAINER_INTERNAL_ERROR; - throw new StorageContainerException( - "Cannot quasi close container #" + container.getContainerData() - .getContainerID() + " while in " + state + " state.", error); - } - container.quasiClose(); - sendICR(container); - } finally { - container.writeUnlock(); - } - } - - @Override - public void closeContainer(Container container) - throws IOException { - container.writeLock(); - try { - final State state = container.getContainerState(); - // Close call is idempotent. - if (state == State.CLOSED) { - return; - } - if (state == State.UNHEALTHY) { - throw new StorageContainerException( - "Cannot close container #" + container.getContainerData() - .getContainerID() + " while in " + state + " state.", - ContainerProtos.Result.CONTAINER_UNHEALTHY); - } - // The container has to be either in CLOSING or in QUASI_CLOSED state. - if (state != State.CLOSING && state != State.QUASI_CLOSED) { - ContainerProtos.Result error = - state == State.INVALID ? INVALID_CONTAINER_STATE : - CONTAINER_INTERNAL_ERROR; - throw new StorageContainerException( - "Cannot close container #" + container.getContainerData() - .getContainerID() + " while in " + state + " state.", error); - } - container.close(); - sendICR(container); - } finally { - container.writeUnlock(); - } - } - - @Override - public void deleteContainer(Container container, boolean force) - throws IOException { - deleteInternal(container, force); - } - - private void deleteInternal(Container container, boolean force) - throws StorageContainerException { - container.writeLock(); - try { - // If force is false, we check container state. - if (!force) { - // Check if container is open - if (container.getContainerData().isOpen()) { - throw new StorageContainerException( - "Deletion of Open Container is not allowed.", - DELETE_ON_OPEN_CONTAINER); - } - } - long containerId = container.getContainerData().getContainerID(); - containerSet.removeContainer(containerId); - } finally { - container.writeUnlock(); - } - // Avoid holding write locks for disk operations - container.delete(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java deleted file mode 100644 index 13689a705cea0..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java +++ /dev/null @@ -1,249 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue; - -import java.io.BufferedOutputStream; -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.stream.Collectors; - -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker; - -import com.google.common.base.Preconditions; -import org.apache.commons.compress.archivers.ArchiveEntry; -import org.apache.commons.compress.archivers.ArchiveOutputStream; -import org.apache.commons.compress.archivers.tar.TarArchiveEntry; -import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; -import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; -import org.apache.commons.compress.compressors.CompressorException; -import org.apache.commons.compress.compressors.CompressorInputStream; -import org.apache.commons.compress.compressors.CompressorOutputStream; -import org.apache.commons.compress.compressors.CompressorStreamFactory; -import org.apache.commons.io.IOUtils; - -/** - * Compress/uncompress KeyValueContainer data to a tar.gz archive. - */ -public class TarContainerPacker - implements ContainerPacker { - - private static final String CHUNKS_DIR_NAME = OzoneConsts.STORAGE_DIR_CHUNKS; - - private static final String DB_DIR_NAME = "db"; - - private static final String CONTAINER_FILE_NAME = "container.yaml"; - - - - /** - * Given an input stream (tar file) extract the data to the specified - * directories. - * - * @param container container which defines the destination structure. - * @param inputStream the input stream. - * @throws IOException - */ - @Override - public byte[] unpackContainerData(Container container, - InputStream inputStream) - throws IOException { - byte[] descriptorFileContent = null; - try { - KeyValueContainerData containerData = container.getContainerData(); - CompressorInputStream compressorInputStream = - new CompressorStreamFactory() - .createCompressorInputStream(CompressorStreamFactory.GZIP, - inputStream); - - TarArchiveInputStream tarInput = - new TarArchiveInputStream(compressorInputStream); - - TarArchiveEntry entry = tarInput.getNextTarEntry(); - while (entry != null) { - String name = entry.getName(); - if (name.startsWith(DB_DIR_NAME + "/")) { - Path destinationPath = containerData.getDbFile().toPath() - .resolve(name.substring(DB_DIR_NAME.length() + 1)); - extractEntry(tarInput, entry.getSize(), destinationPath); - } else if (name.startsWith(CHUNKS_DIR_NAME + "/")) { - Path destinationPath = Paths.get(containerData.getChunksPath()) - .resolve(name.substring(CHUNKS_DIR_NAME.length() + 1)); - extractEntry(tarInput, entry.getSize(), destinationPath); - } else if (name.equals(CONTAINER_FILE_NAME)) { - //Don't do anything. Container file should be unpacked in a - //separated step by unpackContainerDescriptor call. - descriptorFileContent = readEntry(tarInput, entry); - } else { - throw new IllegalArgumentException( - "Unknown entry in the tar file: " + "" + name); - } - entry = tarInput.getNextTarEntry(); - } - return descriptorFileContent; - - } catch (CompressorException e) { - throw new IOException( - "Can't uncompress the given container: " + container - .getContainerData().getContainerID(), - e); - } - } - - private void extractEntry(TarArchiveInputStream tarInput, long size, - Path path) throws IOException { - Preconditions.checkNotNull(path, "Path element should not be null"); - Path parent = Preconditions.checkNotNull(path.getParent(), - "Path element should have a parent directory"); - Files.createDirectories(parent); - try (BufferedOutputStream bos = new BufferedOutputStream( - new FileOutputStream(path.toAbsolutePath().toString()))) { - int bufferSize = 1024; - byte[] buffer = new byte[bufferSize + 1]; - long remaining = size; - while (remaining > 0) { - int read = - tarInput.read(buffer, 0, (int) Math.min(remaining, bufferSize)); - if (read >= 0) { - remaining -= read; - bos.write(buffer, 0, read); - } else { - remaining = 0; - } - } - } - - } - - /** - * Given a containerData include all the required container data/metadata - * in a tar file. - * - * @param container Container to archive (data + metadata). - * @param destination Destination tar file/stream. - * @throws IOException - */ - @Override - public void pack(Container container, - OutputStream destination) - throws IOException { - - KeyValueContainerData containerData = container.getContainerData(); - - try (CompressorOutputStream gzippedOut = new CompressorStreamFactory() - .createCompressorOutputStream(CompressorStreamFactory.GZIP, - destination)) { - - try (ArchiveOutputStream archiveOutputStream = new TarArchiveOutputStream( - gzippedOut)) { - - includePath(containerData.getDbFile().toString(), DB_DIR_NAME, - archiveOutputStream); - - includePath(containerData.getChunksPath(), CHUNKS_DIR_NAME, - archiveOutputStream); - - includeFile(container.getContainerFile(), - CONTAINER_FILE_NAME, - archiveOutputStream); - } - } catch (CompressorException e) { - throw new IOException( - "Can't compress the container: " + containerData.getContainerID(), - e); - } - - } - - @Override - public byte[] unpackContainerDescriptor(InputStream inputStream) - throws IOException { - try { - CompressorInputStream compressorInputStream = - new CompressorStreamFactory() - .createCompressorInputStream(CompressorStreamFactory.GZIP, - inputStream); - - TarArchiveInputStream tarInput = - new TarArchiveInputStream(compressorInputStream); - - TarArchiveEntry entry = tarInput.getNextTarEntry(); - while (entry != null) { - String name = entry.getName(); - if (name.equals(CONTAINER_FILE_NAME)) { - return readEntry(tarInput, entry); - } - entry = tarInput.getNextTarEntry(); - } - - } catch (CompressorException e) { - throw new IOException( - "Can't read the container descriptor from the container archive", - e); - } - throw new IOException( - "Container descriptor is missing from the container archive."); - } - - private byte[] readEntry(TarArchiveInputStream tarInput, - TarArchiveEntry entry) throws IOException { - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - int bufferSize = 1024; - byte[] buffer = new byte[bufferSize + 1]; - long remaining = entry.getSize(); - while (remaining > 0) { - int read = - tarInput.read(buffer, 0, (int) Math.min(remaining, bufferSize)); - remaining -= read; - bos.write(buffer, 0, read); - } - return bos.toByteArray(); - } - - private void includePath(String containerPath, String subdir, - ArchiveOutputStream archiveOutputStream) throws IOException { - - for (Path path : Files.list(Paths.get(containerPath)) - .collect(Collectors.toList())) { - - includeFile(path.toFile(), subdir + "/" + path.getFileName(), - archiveOutputStream); - } - } - - private void includeFile(File file, String entryName, - ArchiveOutputStream archiveOutputStream) throws IOException { - ArchiveEntry archiveEntry = - archiveOutputStream.createArchiveEntry(file, entryName); - archiveOutputStream.putArchiveEntry(archiveEntry); - try (FileInputStream fis = new FileInputStream(file)) { - IOUtils.copy(fis, archiveOutputStream); - } - archiveOutputStream.closeArchiveEntry(); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java deleted file mode 100644 index da7c8579d887b..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.helpers; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .GetBlockResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - GetCommittedBlockLengthResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - PutBlockResponseProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; - -import java.io.IOException; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.NO_SUCH_BLOCK; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.UNABLE_TO_READ_METADATA_DB; - -/** - * Utils functions to help block functions. - */ -public final class BlockUtils { - - /** Never constructed. **/ - private BlockUtils() { - - } - /** - * Get a DB handler for a given container. - * If the handler doesn't exist in cache yet, first create one and - * add into cache. This function is called with containerManager - * ReadLock held. - * - * @param containerData containerData. - * @param conf configuration. - * @return MetadataStore handle. - * @throws StorageContainerException - */ - public static ReferenceCountedDB getDB(KeyValueContainerData containerData, - Configuration conf) throws - StorageContainerException { - Preconditions.checkNotNull(containerData); - ContainerCache cache = ContainerCache.getInstance(conf); - Preconditions.checkNotNull(cache); - Preconditions.checkNotNull(containerData.getDbFile()); - try { - return cache.getDB(containerData.getContainerID(), containerData - .getContainerDBType(), containerData.getDbFile().getAbsolutePath(), - conf); - } catch (IOException ex) { - String message = String.format("Error opening DB. Container:%s " + - "ContainerPath:%s", containerData.getContainerID(), containerData - .getDbFile().getPath()); - throw new StorageContainerException(message, UNABLE_TO_READ_METADATA_DB); - } - } - /** - * Remove a DB handler from cache. - * - * @param container - Container data. - * @param conf - Configuration. - */ - public static void removeDB(KeyValueContainerData container, Configuration - conf) { - Preconditions.checkNotNull(container); - ContainerCache cache = ContainerCache.getInstance(conf); - Preconditions.checkNotNull(cache); - cache.removeDB(container.getDbFile().getAbsolutePath()); - } - - /** - * Shutdown all DB Handles. - * - * @param cache - Cache for DB Handles. - */ - @SuppressWarnings("unchecked") - public static void shutdownCache(ContainerCache cache) { - cache.shutdownCache(); - } - - /** - * Parses the {@link BlockData} from a bytes array. - * - * @param bytes Block data in bytes. - * @return Block data. - * @throws IOException if the bytes array is malformed or invalid. - */ - public static BlockData getBlockData(byte[] bytes) throws IOException { - try { - ContainerProtos.BlockData blockData = ContainerProtos.BlockData.parseFrom( - bytes); - BlockData data = BlockData.getFromProtoBuf(blockData); - return data; - } catch (IOException e) { - throw new StorageContainerException("Failed to parse block data from " + - "the bytes array.", NO_SUCH_BLOCK); - } - } - - /** - * Returns putBlock response success. - * @param msg - Request. - * @return Response. - */ - public static ContainerCommandResponseProto putBlockResponseSuccess( - ContainerCommandRequestProto msg, BlockData blockData) { - ContainerProtos.BlockData blockDataProto = blockData.getProtoBufMessage(); - GetCommittedBlockLengthResponseProto.Builder - committedBlockLengthResponseBuilder = - getCommittedBlockLengthResponseBuilder(blockData.getSize(), - blockDataProto.getBlockID()); - PutBlockResponseProto.Builder putKeyResponse = - PutBlockResponseProto.newBuilder(); - putKeyResponse - .setCommittedBlockLength(committedBlockLengthResponseBuilder); - ContainerProtos.ContainerCommandResponseProto.Builder builder = - ContainerUtils.getSuccessResponseBuilder(msg); - builder.setPutBlock(putKeyResponse); - return builder.build(); - } - /** - * Returns successful blockResponse. - * @param msg - Request. - * @return Response. - */ - public static ContainerCommandResponseProto getBlockResponseSuccess( - ContainerCommandRequestProto msg) { - return ContainerUtils.getSuccessResponse(msg); - } - - - public static ContainerCommandResponseProto getBlockDataResponse( - ContainerCommandRequestProto msg, BlockData data) { - GetBlockResponseProto.Builder getBlock = ContainerProtos - .GetBlockResponseProto - .newBuilder(); - getBlock.setBlockData(data.getProtoBufMessage()); - ContainerProtos.ContainerCommandResponseProto.Builder builder = - ContainerUtils.getSuccessResponseBuilder(msg); - builder.setGetBlock(getBlock); - return builder.build(); - } - - /** - * Returns successful getCommittedBlockLength Response. - * @param msg - Request. - * @return Response. - */ - public static ContainerCommandResponseProto getBlockLengthResponse( - ContainerCommandRequestProto msg, long blockLength) { - GetCommittedBlockLengthResponseProto.Builder - committedBlockLengthResponseBuilder = - getCommittedBlockLengthResponseBuilder(blockLength, - msg.getGetCommittedBlockLength().getBlockID()); - ContainerProtos.ContainerCommandResponseProto.Builder builder = - ContainerUtils.getSuccessResponseBuilder(msg); - builder.setGetCommittedBlockLength(committedBlockLengthResponseBuilder); - return builder.build(); - } - - public static GetCommittedBlockLengthResponseProto.Builder - getCommittedBlockLengthResponseBuilder(long blockLength, - ContainerProtos.DatanodeBlockID blockID) { - ContainerProtos.GetCommittedBlockLengthResponseProto.Builder - getCommittedBlockLengthResponseBuilder = ContainerProtos. - GetCommittedBlockLengthResponseProto.newBuilder(); - getCommittedBlockLengthResponseBuilder.setBlockLength(blockLength); - getCommittedBlockLengthResponseBuilder.setBlockID(blockID); - return getCommittedBlockLengthResponseBuilder; - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java deleted file mode 100644 index 8ca59b5914644..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java +++ /dev/null @@ -1,319 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.helpers; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl; -import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats; -import org.apache.hadoop.util.Time; -import org.apache.ratis.util.function.CheckedSupplier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.channels.FileLock; -import java.nio.file.Path; -import java.nio.file.StandardOpenOption; -import java.security.NoSuchAlgorithmException; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutionException; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*; - -/** - * Utility methods for chunk operations for KeyValue container. - */ -public final class ChunkUtils { - - private static final Set LOCKS = ConcurrentHashMap.newKeySet(); - - /** Never constructed. **/ - private ChunkUtils() { - - } - - /** - * Writes the data in chunk Info to the specified location in the chunkfile. - * - * @param chunkFile - File to write data to. - * @param chunkInfo - Data stream to write. - * @param data - The data buffer. - * @param volumeIOStats statistics collector - * @param sync whether to do fsync or not - */ - public static void writeData(File chunkFile, ChunkInfo chunkInfo, - ByteBuffer data, VolumeIOStats volumeIOStats, boolean sync) - throws StorageContainerException, ExecutionException, - InterruptedException, NoSuchAlgorithmException { - final int bufferSize = data.remaining(); - Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class); - if (bufferSize != chunkInfo.getLen()) { - String err = String.format("data array does not match the length " + - "specified. DataLen: %d Byte Array: %d", - chunkInfo.getLen(), bufferSize); - log.error(err); - throw new StorageContainerException(err, INVALID_WRITE_SIZE); - } - - Path path = chunkFile.toPath(); - long startTime = Time.monotonicNow(); - processFileExclusively(path, () -> { - FileChannel file = null; - try { - // skip SYNC and DSYNC to reduce contention on file.lock - file = FileChannel.open(path, - StandardOpenOption.CREATE, - StandardOpenOption.WRITE, - StandardOpenOption.SPARSE); - - int size; - try (FileLock ignored = file.lock()) { - size = file.write(data, chunkInfo.getOffset()); - } - - // Increment volumeIO stats here. - volumeIOStats.incWriteTime(Time.monotonicNow() - startTime); - volumeIOStats.incWriteOpCount(); - volumeIOStats.incWriteBytes(size); - if (size != bufferSize) { - log.error("Invalid write size found. Size:{} Expected: {} ", size, - bufferSize); - throw new StorageContainerException("Invalid write size found. " + - "Size: " + size + " Expected: " + bufferSize, INVALID_WRITE_SIZE); - } - } catch (StorageContainerException ex) { - throw ex; - } catch (IOException e) { - throw new StorageContainerException(e, IO_EXCEPTION); - } finally { - closeFile(file, sync); - } - - return null; - }); - - if (log.isDebugEnabled()) { - log.debug("Write Chunk completed for chunkFile: {}, size {}", chunkFile, - bufferSize); - } - } - - /** - * Reads data from an existing chunk file. - * - * @param chunkFile - file where data lives. - * @param data - chunk definition. - * @param volumeIOStats statistics collector - * @return ByteBuffer - */ - public static ByteBuffer readData(File chunkFile, ChunkInfo data, - VolumeIOStats volumeIOStats) throws StorageContainerException { - Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class); - - if (!chunkFile.exists()) { - log.error("Unable to find the chunk file. chunk info : {}", - data.toString()); - throw new StorageContainerException("Unable to find the chunk file. " + - "chunk info " + - data.toString(), UNABLE_TO_FIND_CHUNK); - } - - long offset = data.getOffset(); - long len = data.getLen(); - ByteBuffer buf = ByteBuffer.allocate((int) len); - - Path path = chunkFile.toPath(); - long startTime = Time.monotonicNow(); - return processFileExclusively(path, () -> { - FileChannel file = null; - - try { - file = FileChannel.open(path, StandardOpenOption.READ); - - try (FileLock ignored = file.lock(offset, len, true)) { - file.read(buf, offset); - buf.flip(); - } - - // Increment volumeIO stats here. - volumeIOStats.incReadTime(Time.monotonicNow() - startTime); - volumeIOStats.incReadOpCount(); - volumeIOStats.incReadBytes(len); - - return buf; - } catch (IOException e) { - throw new StorageContainerException(e, IO_EXCEPTION); - } finally { - if (file != null) { - IOUtils.closeStream(file); - } - } - }); - } - - /** - * Validates chunk data and returns a file object to Chunk File that we are - * expected to write data to. - * - * @param chunkFile - chunkFile to write data into. - * @param info - chunk info. - * @return true if the chunkFile exists and chunkOffset < chunkFile length, - * false otherwise. - */ - public static boolean validateChunkForOverwrite(File chunkFile, - ChunkInfo info) { - - Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class); - - if (isOverWriteRequested(chunkFile, info)) { - if (!isOverWritePermitted(info)) { - log.warn("Duplicate write chunk request. Chunk overwrite " + - "without explicit request. {}", info.toString()); - } - return true; - } - return false; - } - - /** - * Validates that Path to chunk file exists. - * - * @param containerData - Container Data - * @param info - Chunk info - * @return - File. - * @throws StorageContainerException - */ - public static File getChunkFile(KeyValueContainerData containerData, - ChunkInfo info) throws - StorageContainerException { - - Preconditions.checkNotNull(containerData, "Container data can't be null"); - Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class); - - String chunksPath = containerData.getChunksPath(); - if (chunksPath == null) { - log.error("Chunks path is null in the container data"); - throw new StorageContainerException("Unable to get Chunks directory.", - UNABLE_TO_FIND_DATA_DIR); - } - File chunksLoc = new File(chunksPath); - if (!chunksLoc.exists()) { - log.error("Chunks path does not exist"); - throw new StorageContainerException("Unable to get Chunks directory.", - UNABLE_TO_FIND_DATA_DIR); - } - - return chunksLoc.toPath().resolve(info.getChunkName()).toFile(); - } - - /** - * Checks if we are getting a request to overwrite an existing range of - * chunk. - * - * @param chunkFile - File - * @param chunkInfo - Buffer to write - * @return bool - */ - public static boolean isOverWriteRequested(File chunkFile, ChunkInfo - chunkInfo) { - - if (!chunkFile.exists()) { - return false; - } - - long offset = chunkInfo.getOffset(); - return offset < chunkFile.length(); - } - - /** - * Overwrite is permitted if an only if the user explicitly asks for it. We - * permit this iff the key/value pair contains a flag called - * [OverWriteRequested, true]. - * - * @param chunkInfo - Chunk info - * @return true if the user asks for it. - */ - public static boolean isOverWritePermitted(ChunkInfo chunkInfo) { - String overWrite = chunkInfo.getMetadata().get(OzoneConsts.CHUNK_OVERWRITE); - return (overWrite != null) && - (!overWrite.isEmpty()) && - (Boolean.valueOf(overWrite)); - } - - /** - * Returns a CreateContainer Response. This call is used by create and delete - * containers which have null success responses. - * - * @param msg Request - * @return Response. - */ - public static ContainerCommandResponseProto getChunkResponseSuccess( - ContainerCommandRequestProto msg) { - return ContainerUtils.getSuccessResponse(msg); - } - - @VisibleForTesting - static T processFileExclusively( - Path path, CheckedSupplier op - ) throws E { - for (;;) { - if (LOCKS.add(path)) { - break; - } - } - - try { - return op.get(); - } finally { - LOCKS.remove(path); - } - } - - private static void closeFile(FileChannel file, boolean sync) - throws StorageContainerException { - if (file != null) { - try { - if (sync) { - // ensure data and metadata is persisted - file.force(true); - } - file.close(); - } catch (IOException e) { - throw new StorageContainerException("Error closing chunk file", - e, CONTAINER_INTERNAL_ERROR); - } - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java deleted file mode 100644 index 0c7a04e51da3b..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.helpers; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.Storage; - -import java.io.File; - -/** - * Class which provides utility methods for container locations. - */ -public final class KeyValueContainerLocationUtil { - - /* Never constructed. */ - private KeyValueContainerLocationUtil() { - - } - /** - * Returns Container Metadata Location. - * @param hddsVolumeDir base dir of the hdds volume where scm directories - * are stored - * @param scmId - * @param containerId - * @return containerMetadata Path to container metadata location where - * .container file will be stored. - */ - public static File getContainerMetaDataPath(String hddsVolumeDir, - String scmId, - long containerId) { - String containerMetaDataPath = - getBaseContainerLocation(hddsVolumeDir, scmId, - containerId); - containerMetaDataPath = containerMetaDataPath + File.separator + - OzoneConsts.CONTAINER_META_PATH; - return new File(containerMetaDataPath); - } - - - /** - * Returns Container Chunks Location. - * @param baseDir - * @param scmId - * @param containerId - * @return chunksPath - */ - public static File getChunksLocationPath(String baseDir, String scmId, - long containerId) { - String chunksPath = getBaseContainerLocation(baseDir, scmId, containerId) - + File.separator + OzoneConsts.STORAGE_DIR_CHUNKS; - return new File(chunksPath); - } - - /** - * Returns base directory for specified container. - * @param hddsVolumeDir - * @param scmId - * @param containerId - * @return base directory for container. - */ - private static String getBaseContainerLocation(String hddsVolumeDir, - String scmId, - long containerId) { - Preconditions.checkNotNull(hddsVolumeDir, "Base Directory cannot be null"); - Preconditions.checkNotNull(scmId, "scmUuid cannot be null"); - Preconditions.checkState(containerId >= 0, - "Container Id cannot be negative."); - - String containerSubDirectory = getContainerSubDirectory(containerId); - - String containerMetaDataPath = hddsVolumeDir + File.separator + scmId + - File.separator + Storage.STORAGE_DIR_CURRENT + File.separator + - containerSubDirectory + File.separator + containerId; - - return containerMetaDataPath; - } - - /** - * Returns subdirectory, where this container needs to be placed. - * @param containerId - * @return container sub directory - */ - private static String getContainerSubDirectory(long containerId){ - int directory = (int) ((containerId >> 9) & 0xFF); - return Storage.CONTAINER_DIR + directory; - } - - /** - * Return containerDB File. - */ - public static File getContainerDBFile(File containerMetaDataPath, - long containerID) { - return new File(containerMetaDataPath, containerID + OzoneConsts - .DN_CONTAINER_DB); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java deleted file mode 100644 index 3733b06b73549..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.helpers; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.List; -import java.util.Map; - -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.hdds.utils.MetadataStore; -import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; - -import com.google.common.base.Preconditions; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class which defines utility methods for KeyValueContainer. - */ - -public final class KeyValueContainerUtil { - - /* Never constructed. */ - private KeyValueContainerUtil() { - - } - - private static final Logger LOG = LoggerFactory.getLogger( - KeyValueContainerUtil.class); - - /** - * creates metadata path, chunks path and metadata DB for the specified - * container. - * - * @param containerMetaDataPath - * @throws IOException - */ - public static void createContainerMetaData(File containerMetaDataPath, File - chunksPath, File dbFile, Configuration conf) throws IOException { - Preconditions.checkNotNull(containerMetaDataPath); - Preconditions.checkNotNull(conf); - - if (!containerMetaDataPath.mkdirs()) { - LOG.error("Unable to create directory for metadata storage. Path: {}", - containerMetaDataPath); - throw new IOException("Unable to create directory for metadata storage." + - " Path: " + containerMetaDataPath); - } - MetadataStore store = MetadataStoreBuilder.newBuilder().setConf(conf) - .setCreateIfMissing(true).setDbFile(dbFile).build(); - - // we close since the SCM pre-creates containers. - // we will open and put Db handle into a cache when keys are being created - // in a container. - - store.close(); - - if (!chunksPath.mkdirs()) { - LOG.error("Unable to create chunks directory Container {}", - chunksPath); - //clean up container metadata path and metadata db - FileUtils.deleteDirectory(containerMetaDataPath); - FileUtils.deleteDirectory(containerMetaDataPath.getParentFile()); - throw new IOException("Unable to create directory for data storage." + - " Path: " + chunksPath); - } - } - - /** - * remove Container if it is empty. - *

- * There are three things we need to delete. - *

- * 1. Container file and metadata file. 2. The Level DB file 3. The path that - * we created on the data location. - * - * @param containerData - Data of the container to remove. - * @param conf - configuration of the cluster. - * @throws IOException - */ - public static void removeContainer(KeyValueContainerData containerData, - Configuration conf) - throws IOException { - Preconditions.checkNotNull(containerData); - File containerMetaDataPath = new File(containerData - .getMetadataPath()); - File chunksPath = new File(containerData.getChunksPath()); - - // Close the DB connection and remove the DB handler from cache - BlockUtils.removeDB(containerData, conf); - - // Delete the Container MetaData path. - FileUtils.deleteDirectory(containerMetaDataPath); - - //Delete the Container Chunks Path. - FileUtils.deleteDirectory(chunksPath); - - //Delete Container directory - FileUtils.deleteDirectory(containerMetaDataPath.getParentFile()); - } - - /** - * Returns a ReadContainer Response. - * - * @param request Request - * @param containerData - data - * @return Response. - */ - public static ContainerCommandResponseProto getReadContainerResponse( - ContainerCommandRequestProto request, - KeyValueContainerData containerData) { - Preconditions.checkNotNull(containerData); - - ContainerProtos.ReadContainerResponseProto.Builder response = - ContainerProtos.ReadContainerResponseProto.newBuilder(); - response.setContainerData(containerData.getProtoBufMessage()); - - ContainerCommandResponseProto.Builder builder = - ContainerUtils.getSuccessResponseBuilder(request); - builder.setReadContainer(response); - return builder.build(); - } - - /** - * Parse KeyValueContainerData and verify checksum. - * @param kvContainerData - * @param config - * @throws IOException - */ - public static void parseKVContainerData(KeyValueContainerData kvContainerData, - Configuration config) throws IOException { - - long containerID = kvContainerData.getContainerID(); - File metadataPath = new File(kvContainerData.getMetadataPath()); - - // Verify Checksum - ContainerUtils.verifyChecksum(kvContainerData); - - File dbFile = KeyValueContainerLocationUtil.getContainerDBFile( - metadataPath, containerID); - if (!dbFile.exists()) { - LOG.error("Container DB file is missing for ContainerID {}. " + - "Skipping loading of this container.", containerID); - // Don't further process this container, as it is missing db file. - return; - } - kvContainerData.setDbFile(dbFile); - - try(ReferenceCountedDB metadata = - BlockUtils.getDB(kvContainerData, config)) { - long bytesUsed = 0; - List> liveKeys = metadata.getStore() - .getRangeKVs(null, Integer.MAX_VALUE, - MetadataKeyFilters.getNormalKeyFilter()); - - bytesUsed = liveKeys.parallelStream().mapToLong(e-> { - BlockData blockData; - try { - blockData = BlockUtils.getBlockData(e.getValue()); - return blockData.getSize(); - } catch (IOException ex) { - return 0L; - } - }).sum(); - kvContainerData.setBytesUsed(bytesUsed); - kvContainerData.setKeyCount(liveKeys.size()); - byte[] bcsId = metadata.getStore().get(DFSUtil.string2Bytes( - OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX)); - if (bcsId != null) { - kvContainerData.updateBlockCommitSequenceId(Longs.fromByteArray(bcsId)); - } - } - } - - /** - * Returns the path where data or chunks live for a given container. - * - * @param kvContainerData - KeyValueContainerData - * @return - Path to the chunks directory - */ - public static Path getDataDirectory(KeyValueContainerData kvContainerData) { - - String chunksPath = kvContainerData.getChunksPath(); - Preconditions.checkNotNull(chunksPath); - - return Paths.get(chunksPath); - } - - /** - * Container metadata directory -- here is where the level DB and - * .container file lives. - * - * @param kvContainerData - KeyValueContainerData - * @return Path to the metadata directory - */ - public static Path getMetadataDirectory( - KeyValueContainerData kvContainerData) { - - String metadataPath = kvContainerData.getMetadataPath(); - Preconditions.checkNotNull(metadataPath); - - return Paths.get(metadataPath); - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java deleted file mode 100644 index ba2b02c88b648..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.helpers; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; - -/** - * File Utils are helper routines used by putSmallFile and getSmallFile - * RPCs. - */ -public final class SmallFileUtils { - /** - * Never Constructed. - */ - private SmallFileUtils() { - } - - /** - * Gets a response for the putSmallFile RPC. - * @param msg - ContainerCommandRequestProto - * @return - ContainerCommandResponseProto - */ - public static ContainerCommandResponseProto getPutFileResponseSuccess( - ContainerCommandRequestProto msg, BlockData blockData) { - ContainerProtos.PutSmallFileResponseProto.Builder getResponse = - ContainerProtos.PutSmallFileResponseProto.newBuilder(); - ContainerProtos.BlockData blockDataProto = blockData.getProtoBufMessage(); - ContainerProtos.GetCommittedBlockLengthResponseProto.Builder - committedBlockLengthResponseBuilder = BlockUtils - .getCommittedBlockLengthResponseBuilder(blockDataProto.getSize(), - blockDataProto.getBlockID()); - getResponse.setCommittedBlockLength(committedBlockLengthResponseBuilder); - ContainerCommandResponseProto.Builder builder = - ContainerUtils.getSuccessResponseBuilder(msg); - builder.setCmdType(ContainerProtos.Type.PutSmallFile); - builder.setPutSmallFile(getResponse); - return builder.build(); - } - - /** - * Gets a response to the read small file call. - * @param msg - Msg - * @param data - Data - * @param info - Info - * @return Response. - */ - public static ContainerCommandResponseProto getGetSmallFileResponseSuccess( - ContainerCommandRequestProto msg, byte[] data, ChunkInfo info) { - Preconditions.checkNotNull(msg); - - ContainerProtos.ReadChunkResponseProto.Builder readChunkresponse = - ContainerProtos.ReadChunkResponseProto.newBuilder(); - readChunkresponse.setChunkData(info.getProtoBufMessage()); - readChunkresponse.setData(ByteString.copyFrom(data)); - readChunkresponse.setBlockID(msg.getGetSmallFile().getBlock().getBlockID()); - - ContainerProtos.GetSmallFileResponseProto.Builder getSmallFile = - ContainerProtos.GetSmallFileResponseProto.newBuilder(); - getSmallFile.setData(readChunkresponse.build()); - ContainerCommandResponseProto.Builder builder = - ContainerUtils.getSuccessResponseBuilder(msg); - builder.setCmdType(ContainerProtos.Type.GetSmallFile); - builder.setGetSmallFile(getSmallFile); - return builder.build(); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/package-info.java deleted file mode 100644 index 041f485deae49..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.helpers; -/** - This package contains utility classes for KeyValue container type. - **/ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java deleted file mode 100644 index 4272861c57e2d..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.impl; - -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; - -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache; -import org.apache.hadoop.hdds.utils.BatchOperation; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNKNOWN_BCSID; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BCSID_MISMATCH; -/** - * This class is for performing block related operations on the KeyValue - * Container. - */ -public class BlockManagerImpl implements BlockManager { - - static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class); - private static byte[] blockCommitSequenceIdKey = - DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX); - - private Configuration config; - - /** - * Constructs a Block Manager. - * - * @param conf - Ozone configuration - */ - public BlockManagerImpl(Configuration conf) { - Preconditions.checkNotNull(conf, "Config cannot be null"); - this.config = conf; - } - - /** - * Puts or overwrites a block. - * - * @param container - Container for which block need to be added. - * @param data - BlockData. - * @return length of the block. - * @throws IOException - */ - public long putBlock(Container container, BlockData data) throws IOException { - Preconditions.checkNotNull(data, "BlockData cannot be null for put " + - "operation."); - Preconditions.checkState(data.getContainerID() >= 0, "Container Id " + - "cannot be negative"); - // We are not locking the key manager since LevelDb serializes all actions - // against a single DB. We rely on DB level locking to avoid conflicts. - try(ReferenceCountedDB db = BlockUtils. - getDB((KeyValueContainerData) container.getContainerData(), config)) { - // This is a post condition that acts as a hint to the user. - // Should never fail. - Preconditions.checkNotNull(db, "DB cannot be null here"); - - long bcsId = data.getBlockCommitSequenceId(); - long containerBCSId = ((KeyValueContainerData) container. - getContainerData()).getBlockCommitSequenceId(); - - // default blockCommitSequenceId for any block is 0. It the putBlock - // request is not coming via Ratis(for test scenarios), it will be 0. - // In such cases, we should overwrite the block as well - if (bcsId != 0) { - if (bcsId <= containerBCSId) { - // Since the blockCommitSequenceId stored in the db is greater than - // equal to blockCommitSequenceId to be updated, it means the putBlock - // transaction is reapplied in the ContainerStateMachine on restart. - // It also implies that the given block must already exist in the db. - // just log and return - LOG.warn("blockCommitSequenceId " + containerBCSId - + " in the Container Db is greater than" + " the supplied value " - + bcsId + " .Ignoring it"); - return data.getSize(); - } - } - // update the blockData as well as BlockCommitSequenceId here - BatchOperation batch = new BatchOperation(); - batch.put(Longs.toByteArray(data.getLocalID()), - data.getProtoBufMessage().toByteArray()); - batch.put(blockCommitSequenceIdKey, - Longs.toByteArray(bcsId)); - db.getStore().writeBatch(batch); - container.updateBlockCommitSequenceId(bcsId); - // Increment keycount here - container.getContainerData().incrKeyCount(); - if (LOG.isDebugEnabled()) { - LOG.debug( - "Block " + data.getBlockID() + " successfully committed with bcsId " - + bcsId + " chunk size " + data.getChunks().size()); - } - return data.getSize(); - } - } - - /** - * Gets an existing block. - * - * @param container - Container from which block need to be fetched. - * @param blockID - BlockID of the block. - * @return Key Data. - * @throws IOException - */ - @Override - public BlockData getBlock(Container container, BlockID blockID) - throws IOException { - long bcsId = blockID.getBlockCommitSequenceId(); - Preconditions.checkNotNull(blockID, - "BlockID cannot be null in GetBlock request"); - Preconditions.checkNotNull(container, - "Container cannot be null"); - - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); - try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { - // This is a post condition that acts as a hint to the user. - // Should never fail. - Preconditions.checkNotNull(db, "DB cannot be null here"); - - long containerBCSId = containerData.getBlockCommitSequenceId(); - if (containerBCSId < bcsId) { - throw new StorageContainerException( - "Unable to find the block with bcsID " + bcsId + " .Container " - + container.getContainerData().getContainerID() + " bcsId is " - + containerBCSId + ".", UNKNOWN_BCSID); - } - byte[] kData = db.getStore().get(Longs.toByteArray(blockID.getLocalID())); - if (kData == null) { - throw new StorageContainerException("Unable to find the block." + - blockID, NO_SUCH_BLOCK); - } - ContainerProtos.BlockData blockData = - ContainerProtos.BlockData.parseFrom(kData); - long id = blockData.getBlockID().getBlockCommitSequenceId(); - if (id < bcsId) { - throw new StorageContainerException( - "bcsId " + bcsId + " mismatches with existing block Id " - + id + " for block " + blockID + ".", BCSID_MISMATCH); - } - return BlockData.getFromProtoBuf(blockData); - } - } - - /** - * Returns the length of the committed block. - * - * @param container - Container from which block need to be fetched. - * @param blockID - BlockID of the block. - * @return length of the block. - * @throws IOException in case, the block key does not exist in db. - */ - @Override - public long getCommittedBlockLength(Container container, BlockID blockID) - throws IOException { - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); - try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { - // This is a post condition that acts as a hint to the user. - // Should never fail. - Preconditions.checkNotNull(db, "DB cannot be null here"); - byte[] kData = db.getStore().get(Longs.toByteArray(blockID.getLocalID())); - if (kData == null) { - throw new StorageContainerException("Unable to find the block.", - NO_SUCH_BLOCK); - } - ContainerProtos.BlockData blockData = - ContainerProtos.BlockData.parseFrom(kData); - return blockData.getSize(); - } - } - - /** - * Deletes an existing block. - * - * @param container - Container from which block need to be deleted. - * @param blockID - ID of the block. - * @throws StorageContainerException - */ - public void deleteBlock(Container container, BlockID blockID) throws - IOException { - Preconditions.checkNotNull(blockID, "block ID cannot be null."); - Preconditions.checkState(blockID.getContainerID() >= 0, - "Container ID cannot be negative."); - Preconditions.checkState(blockID.getLocalID() >= 0, - "Local ID cannot be negative."); - - KeyValueContainerData cData = (KeyValueContainerData) container - .getContainerData(); - try(ReferenceCountedDB db = BlockUtils.getDB(cData, config)) { - // This is a post condition that acts as a hint to the user. - // Should never fail. - Preconditions.checkNotNull(db, "DB cannot be null here"); - // Note : There is a race condition here, since get and delete - // are not atomic. Leaving it here since the impact is refusing - // to delete a Block which might have just gotten inserted after - // the get check. - byte[] kKey = Longs.toByteArray(blockID.getLocalID()); - - byte[] kData = db.getStore().get(kKey); - if (kData == null) { - throw new StorageContainerException("Unable to find the block.", - NO_SUCH_BLOCK); - } - db.getStore().delete(kKey); - // Decrement blockcount here - container.getContainerData().decrKeyCount(); - } - } - - /** - * List blocks in a container. - * - * @param container - Container from which blocks need to be listed. - * @param startLocalID - Key to start from, 0 to begin. - * @param count - Number of blocks to return. - * @return List of Blocks that match the criteria. - */ - @Override - public List listBlock(Container container, long startLocalID, int - count) throws IOException { - Preconditions.checkNotNull(container, "container cannot be null"); - Preconditions.checkState(startLocalID >= 0, "startLocal ID cannot be " + - "negative"); - Preconditions.checkArgument(count > 0, - "Count must be a positive number."); - container.readLock(); - try { - List result = null; - KeyValueContainerData cData = - (KeyValueContainerData) container.getContainerData(); - try (ReferenceCountedDB db = BlockUtils.getDB(cData, config)) { - result = new ArrayList<>(); - byte[] startKeyInBytes = Longs.toByteArray(startLocalID); - List> range = db.getStore() - .getSequentialRangeKVs(startKeyInBytes, count, - MetadataKeyFilters.getNormalKeyFilter()); - for (Map.Entry entry : range) { - BlockData value = BlockUtils.getBlockData(entry.getValue()); - BlockData data = new BlockData(value.getBlockID()); - result.add(data); - } - return result; - } - } finally { - container.readUnlock(); - } - } - - /** - * Shutdown KeyValueContainerManager. - */ - public void shutdown() { - BlockUtils.shutdownCache(ContainerCache.getInstance(config)); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java deleted file mode 100644 index fa9e205786e00..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.impl; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*; - -/** - * Implementation of ChunkManager built for running performance tests. - * Chunks are not written to disk, Reads are returned with zero-filled buffers - */ -public class ChunkManagerDummyImpl extends ChunkManagerImpl { - static final Logger LOG = LoggerFactory.getLogger( - ChunkManagerDummyImpl.class); - - public ChunkManagerDummyImpl(boolean sync) { - super(sync); - } - - /** - * writes a given chunk. - * - * @param container - Container for the chunk - * @param blockID - ID of the block - * @param info - ChunkInfo - * @param data - data of the chunk - * @param dispatcherContext - dispatcherContextInfo - * @throws StorageContainerException - */ - @Override - public void writeChunk(Container container, BlockID blockID, ChunkInfo info, - ByteBuffer data, DispatcherContext dispatcherContext) - throws StorageContainerException { - long writeTimeStart = Time.monotonicNow(); - - Preconditions.checkNotNull(dispatcherContext); - DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage(); - - Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class); - - try { - KeyValueContainerData containerData = - (KeyValueContainerData) container.getContainerData(); - HddsVolume volume = containerData.getVolume(); - VolumeIOStats volumeIOStats = volume.getVolumeIOStats(); - int bufferSize; - - switch (stage) { - case WRITE_DATA: - bufferSize = data.capacity(); - if (bufferSize != info.getLen()) { - String err = String.format("data array does not match the length " - + "specified. DataLen: %d Byte Array: %d", - info.getLen(), bufferSize); - log.error(err); - throw new StorageContainerException(err, INVALID_WRITE_SIZE); - } - - // Increment volumeIO stats here. - volumeIOStats.incWriteTime(Time.monotonicNow() - writeTimeStart); - volumeIOStats.incWriteOpCount(); - volumeIOStats.incWriteBytes(info.getLen()); - break; - case COMMIT_DATA: - updateContainerWriteStats(container, info, false); - break; - case COMBINED: - updateContainerWriteStats(container, info, false); - break; - default: - throw new IOException("Can not identify write operation."); - } - } catch (IOException ex) { - LOG.error("write data failed. error: {}", ex); - throw new StorageContainerException("Internal error: ", ex, - CONTAINER_INTERNAL_ERROR); - } - } - - /** - * return a zero-filled buffer. - * - * @param container - Container for the chunk - * @param blockID - ID of the block. - * @param info - ChunkInfo. - * @param dispatcherContext dispatcher context info. - * @return byte array - * TODO: Right now we do not support partial reads and writes of chunks. - * TODO: Explore if we need to do that for ozone. - */ - @Override - public ByteBuffer readChunk(Container container, BlockID blockID, - ChunkInfo info, DispatcherContext dispatcherContext) { - - long readStartTime = Time.monotonicNow(); - - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); - ByteBuffer data; - HddsVolume volume = containerData.getVolume(); - VolumeIOStats volumeIOStats = volume.getVolumeIOStats(); - - data = ByteBuffer.allocate((int) info.getLen()); - - // Increment volumeIO stats here. - volumeIOStats.incReadTime(Time.monotonicNow() - readStartTime); - volumeIOStats.incReadOpCount(); - volumeIOStats.incReadBytes(info.getLen()); - - return data; - } - - /** - * Delete a given chunk - Do nothing except stats. - * - * @param container - Container for the chunk - * @param blockID - ID of the block - * @param info - Chunk Info - */ - @Override - public void deleteChunk(Container container, BlockID blockID, - ChunkInfo info) { - Preconditions.checkNotNull(blockID, "Block ID cannot be null."); - KeyValueContainerData containerData = - (KeyValueContainerData) container.getContainerData(); - - if (info.getOffset() == 0) { - containerData.decrBytesUsed(info.getLen()); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java deleted file mode 100644 index 85495783cc833..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.impl; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_SCRUB_ENABLED; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_SCRUB_ENABLED_DEFAULT; - -/** - * Select an appropriate ChunkManager implementation as per config setting. - * Ozone ChunkManager is a Singleton - */ -public final class ChunkManagerFactory { - static final Logger LOG = LoggerFactory.getLogger(ChunkManagerFactory.class); - - private static volatile ChunkManager instance = null; - private static boolean syncChunks = false; - - private ChunkManagerFactory() { - } - - public static ChunkManager getChunkManager(Configuration config, - boolean sync) { - if (instance == null) { - synchronized (ChunkManagerFactory.class) { - if (instance == null) { - instance = createChunkManager(config, sync); - syncChunks = sync; - } - } - } - - Preconditions.checkArgument((syncChunks == sync), - "value of sync conflicts with previous invocation"); - return instance; - } - - private static ChunkManager createChunkManager(Configuration config, - boolean sync) { - ChunkManager manager = null; - boolean persist = config.getBoolean(HDDS_CONTAINER_PERSISTDATA, - HDDS_CONTAINER_PERSISTDATA_DEFAULT); - - if (!persist) { - boolean scrubber = config.getBoolean( - HDDS_CONTAINER_SCRUB_ENABLED, - HDDS_CONTAINER_SCRUB_ENABLED_DEFAULT); - if (scrubber) { - // Data Scrubber needs to be disabled for non-persistent chunks. - LOG.warn("Failed to set " + HDDS_CONTAINER_PERSISTDATA + " to false." - + " Please set " + HDDS_CONTAINER_SCRUB_ENABLED - + " also to false to enable non-persistent containers."); - persist = true; - } - } - - if (persist) { - manager = new ChunkManagerImpl(sync); - } else { - LOG.warn(HDDS_CONTAINER_PERSISTDATA - + " is set to false. This should be used only for testing." - + " All user data will be discarded."); - manager = new ChunkManagerDummyImpl(sync); - } - - return manager; - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java deleted file mode 100644 index e22841eec8a49..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java +++ /dev/null @@ -1,312 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.impl; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats; -import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; -import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.file.Files; -import java.nio.file.StandardCopyOption; -import java.security.NoSuchAlgorithmException; -import java.util.concurrent.ExecutionException; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.CONTAINER_INTERNAL_ERROR; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.NO_SUCH_ALGORITHM; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST; - -/** - * This class is for performing chunk related operations. - */ -public class ChunkManagerImpl implements ChunkManager { - static final Logger LOG = LoggerFactory.getLogger(ChunkManagerImpl.class); - private final boolean doSyncWrite; - - public ChunkManagerImpl(boolean sync) { - doSyncWrite = sync; - } - - /** - * writes a given chunk. - * - * @param container - Container for the chunk - * @param blockID - ID of the block - * @param info - ChunkInfo - * @param data - data of the chunk - * @param dispatcherContext - dispatcherContextInfo - * @throws StorageContainerException - */ - public void writeChunk(Container container, BlockID blockID, ChunkInfo info, - ByteBuffer data, DispatcherContext dispatcherContext) - throws StorageContainerException { - Preconditions.checkNotNull(dispatcherContext); - DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage(); - try { - - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); - HddsVolume volume = containerData.getVolume(); - VolumeIOStats volumeIOStats = volume.getVolumeIOStats(); - - File chunkFile = ChunkUtils.getChunkFile(containerData, info); - - boolean isOverwrite = ChunkUtils.validateChunkForOverwrite( - chunkFile, info); - File tmpChunkFile = getTmpChunkFile(chunkFile, dispatcherContext); - if (LOG.isDebugEnabled()) { - LOG.debug( - "writing chunk:{} chunk stage:{} chunk file:{} tmp chunk file:{}", - info.getChunkName(), stage, chunkFile, tmpChunkFile); - } - - switch (stage) { - case WRITE_DATA: - if (isOverwrite) { - // if the actual chunk file already exists here while writing the temp - // chunk file, then it means the same ozone client request has - // generated two raft log entries. This can happen either because - // retryCache expired in Ratis (or log index mismatch/corruption in - // Ratis). This can be solved by two approaches as of now: - // 1. Read the complete data in the actual chunk file , - // verify the data integrity and in case it mismatches , either - // 2. Delete the chunk File and write the chunk again. For now, - // let's rewrite the chunk file - // TODO: once the checksum support for write chunks gets plugged in, - // the checksum needs to be verified for the actual chunk file and - // the data to be written here which should be efficient and - // it matches we can safely return without rewriting. - LOG.warn("ChunkFile already exists" + chunkFile + ".Deleting it."); - FileUtil.fullyDelete(chunkFile); - } - if (tmpChunkFile.exists()) { - // If the tmp chunk file already exists it means the raft log got - // appended, but later on the log entry got truncated in Ratis leaving - // behind garbage. - // TODO: once the checksum support for data chunks gets plugged in, - // instead of rewriting the chunk here, let's compare the checkSums - LOG.warn( - "tmpChunkFile already exists" + tmpChunkFile + "Overwriting it."); - } - // Initially writes to temporary chunk file. - ChunkUtils - .writeData(tmpChunkFile, info, data, volumeIOStats, doSyncWrite); - // No need to increment container stats here, as still data is not - // committed here. - break; - case COMMIT_DATA: - // commit the data, means move chunk data from temporary chunk file - // to actual chunk file. - if (isOverwrite) { - // if the actual chunk file already exists , it implies the write - // chunk transaction in the containerStateMachine is getting - // reapplied. This can happen when a node restarts. - // TODO: verify the checkSums for the existing chunkFile and the - // chunkInfo to be committed here - LOG.warn("ChunkFile already exists" + chunkFile); - return; - } - // While committing a chunk , just rename the tmp chunk file which has - // the same term and log index appended as the current transaction - commitChunk(tmpChunkFile, chunkFile); - // Increment container stats here, as we commit the data. - updateContainerWriteStats(container, info, isOverwrite); - break; - case COMBINED: - // directly write to the chunk file - ChunkUtils.writeData(chunkFile, info, data, volumeIOStats, doSyncWrite); - updateContainerWriteStats(container, info, isOverwrite); - break; - default: - throw new IOException("Can not identify write operation."); - } - } catch (StorageContainerException ex) { - throw ex; - } catch (NoSuchAlgorithmException ex) { - LOG.error("write data failed. error: {}", ex); - throw new StorageContainerException("Internal error: ", ex, - NO_SUCH_ALGORITHM); - } catch (ExecutionException | IOException ex) { - LOG.error("write data failed. error: {}", ex); - throw new StorageContainerException("Internal error: ", ex, - CONTAINER_INTERNAL_ERROR); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - LOG.error("write data failed. error: {}", e); - throw new StorageContainerException("Internal error: ", e, - CONTAINER_INTERNAL_ERROR); - } - } - - protected void updateContainerWriteStats(Container container, ChunkInfo info, - boolean isOverwrite) { - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); - - if (!isOverwrite) { - containerData.incrBytesUsed(info.getLen()); - } - containerData.incrWriteCount(); - containerData.incrWriteBytes(info.getLen()); - } - - /** - * reads the data defined by a chunk. - * - * @param container - Container for the chunk - * @param blockID - ID of the block. - * @param info - ChunkInfo. - * @param dispatcherContext dispatcher context info. - * @return byte array - * @throws StorageContainerException - * TODO: Right now we do not support partial reads and writes of chunks. - * TODO: Explore if we need to do that for ozone. - */ - public ByteBuffer readChunk(Container container, BlockID blockID, - ChunkInfo info, DispatcherContext dispatcherContext) - throws StorageContainerException { - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); - ByteBuffer data; - HddsVolume volume = containerData.getVolume(); - VolumeIOStats volumeIOStats = volume.getVolumeIOStats(); - - // Checking here, which layout version the container is, and reading - // the chunk file in that format. - // In version1, we verify checksum if it is available and return data - // of the chunk file. - if (containerData.getLayOutVersion() == ChunkLayOutVersion - .getLatestVersion().getVersion()) { - File chunkFile = ChunkUtils.getChunkFile(containerData, info); - - // In case the chunk file does not exist but tmp chunk file exist, - // read from tmp chunk file if readFromTmpFile is set to true - if (!chunkFile.exists() && dispatcherContext != null - && dispatcherContext.isReadFromTmpFile()) { - chunkFile = getTmpChunkFile(chunkFile, dispatcherContext); - } - data = ChunkUtils.readData(chunkFile, info, volumeIOStats); - containerData.incrReadCount(); - long length = chunkFile.length(); - containerData.incrReadBytes(length); - return data; - } - return null; - } - - /** - * Deletes a given chunk. - * - * @param container - Container for the chunk - * @param blockID - ID of the block - * @param info - Chunk Info - * @throws StorageContainerException - */ - public void deleteChunk(Container container, BlockID blockID, ChunkInfo info) - throws StorageContainerException { - Preconditions.checkNotNull(blockID, "Block ID cannot be null."); - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); - // Checking here, which layout version the container is, and performing - // deleting chunk operation. - // In version1, we have only chunk file. - if (containerData.getLayOutVersion() == ChunkLayOutVersion - .getLatestVersion().getVersion()) { - File chunkFile = ChunkUtils.getChunkFile(containerData, info); - - // if the chunk file does not exist, it might have already been deleted. - // The call might be because of reapply of transactions on datanode - // restart. - if (!chunkFile.exists()) { - LOG.warn("Chunk file doe not exist. chunk info :" + info.toString()); - return; - } - if ((info.getOffset() == 0) && (info.getLen() == chunkFile.length())) { - FileUtil.fullyDelete(chunkFile); - containerData.decrBytesUsed(chunkFile.length()); - } else { - LOG.error("Not Supported Operation. Trying to delete a " + - "chunk that is in shared file. chunk info : " + info.toString()); - throw new StorageContainerException("Not Supported Operation. " + - "Trying to delete a chunk that is in shared file. chunk info : " - + info.toString(), UNSUPPORTED_REQUEST); - } - } - } - - /** - * Shutdown the chunkManager. - * - * In the chunkManager we haven't acquired any resources, so nothing to do - * here. - */ - - public void shutdown() { - //TODO: need to revisit this during integration of container IO. - } - - /** - * Returns the temporary chunkFile path. - * @param chunkFile chunkFileName - * @param dispatcherContext dispatcher context info - * @return temporary chunkFile path - * @throws StorageContainerException - */ - private File getTmpChunkFile(File chunkFile, - DispatcherContext dispatcherContext) { - return new File(chunkFile.getParent(), - chunkFile.getName() + - OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + - OzoneConsts.CONTAINER_TEMPORARY_CHUNK_PREFIX + - OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + - dispatcherContext.getTerm() + - OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + - dispatcherContext.getLogIndex()); - } - - /** - * Commit the chunk by renaming the temporary chunk file to chunk file. - * @param tmpChunkFile - * @param chunkFile - * @throws IOException - */ - private void commitChunk(File tmpChunkFile, File chunkFile) throws - IOException { - Files.move(tmpChunkFile.toPath(), chunkFile.toPath(), - StandardCopyOption.REPLACE_EXISTING); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java deleted file mode 100644 index 564b50e8a4d27..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.impl; -/** - * Chunk manager and block manager implementations for keyvalue container type. - */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java deleted file mode 100644 index 6812b0d8ff8e5..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.interfaces; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.interfaces.Container; - -import java.io.IOException; -import java.util.List; - -/** - * BlockManager is for performing key related operations on the container. - */ -public interface BlockManager { - - /** - * Puts or overwrites a block. - * - * @param container - Container for which block need to be added. - * @param data - Block Data. - * @return length of the Block. - * @throws IOException - */ - long putBlock(Container container, BlockData data) throws IOException; - - /** - * Gets an existing block. - * - * @param container - Container from which block need to be get. - * @param blockID - BlockID of the Block. - * @return Block Data. - * @throws IOException - */ - BlockData getBlock(Container container, BlockID blockID) - throws IOException; - - /** - * Deletes an existing block. - * - * @param container - Container from which block need to be deleted. - * @param blockID - ID of the block. - * @throws StorageContainerException - */ - void deleteBlock(Container container, BlockID blockID) throws IOException; - - /** - * List blocks in a container. - * - * @param container - Container from which blocks need to be listed. - * @param startLocalID - Block to start from, 0 to begin. - * @param count - Number of blocks to return. - * @return List of Blocks that match the criteria. - */ - List listBlock(Container container, long startLocalID, int count) - throws IOException; - - /** - * Returns the last committed block length for the block. - * @param blockID blockId - */ - long getCommittedBlockLength(Container container, BlockID blockID) - throws IOException; - - /** - * Shutdown ContainerManager. - */ - void shutdown(); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java deleted file mode 100644 index 5adb6415ec1c2..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java +++ /dev/null @@ -1,83 +0,0 @@ -package org.apache.hadoop.ozone.container.keyvalue.interfaces; - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; - -import java.nio.ByteBuffer; - -/** - * Chunk Manager allows read, write, delete and listing of chunks in - * a container. - */ - -public interface ChunkManager { - - /** - * writes a given chunk. - * - * @param container - Container for the chunk - * @param blockID - ID of the block. - * @param info - ChunkInfo. - * @param dispatcherContext - dispatcher context info. - * @throws StorageContainerException - */ - void writeChunk(Container container, BlockID blockID, ChunkInfo info, - ByteBuffer data, DispatcherContext dispatcherContext) - throws StorageContainerException; - - /** - * reads the data defined by a chunk. - * - * @param container - Container for the chunk - * @param blockID - ID of the block. - * @param info - ChunkInfo. - * @param dispatcherContext - dispatcher context info. - * @return byte array - * @throws StorageContainerException - * - * TODO: Right now we do not support partial reads and writes of chunks. - * TODO: Explore if we need to do that for ozone. - */ - ByteBuffer readChunk(Container container, BlockID blockID, ChunkInfo info, - DispatcherContext dispatcherContext) throws StorageContainerException; - - /** - * Deletes a given chunk. - * - * @param container - Container for the chunk - * @param blockID - ID of the block. - * @param info - Chunk Info - * @throws StorageContainerException - */ - void deleteChunk(Container container, BlockID blockID, ChunkInfo info) throws - StorageContainerException; - - // TODO : Support list operations. - - /** - * Shutdown the chunkManager. - */ - void shutdown(); - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java deleted file mode 100644 index 512909451f0d8..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.interfaces; -/** - * Chunk manager and block manager interfaces for keyvalue container type. - */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java deleted file mode 100644 index 53c9f1e0f9712..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue; -/** - This package contains classes for KeyValue container type. - **/ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java deleted file mode 100644 index bc3f51a54ef5f..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ /dev/null @@ -1,332 +0,0 @@ - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.statemachine.background; - -import com.google.common.collect.Lists; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.util.ReflectionUtils; -import org.apache.ratis.thirdparty.com.google.protobuf - .InvalidProtocolBufferException; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.BackgroundService; -import org.apache.hadoop.hdds.utils.BackgroundTask; -import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; -import org.apache.hadoop.hdds.utils.BackgroundTaskResult; -import org.apache.hadoop.hdds.utils.BatchOperation; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT; - -/** - * A per-datanode container block deleting service takes in charge - * of deleting staled ozone blocks. - */ -// TODO: Fix BlockDeletingService to work with new StorageLayer -public class BlockDeletingService extends BackgroundService { - - private static final Logger LOG = - LoggerFactory.getLogger(BlockDeletingService.class); - - private OzoneContainer ozoneContainer; - private ContainerDeletionChoosingPolicy containerDeletionPolicy; - private final Configuration conf; - - // Throttle number of blocks to delete per task, - // set to 1 for testing - private final int blockLimitPerTask; - - // Throttle the number of containers to process concurrently at a time, - private final int containerLimitPerInterval; - - // Task priority is useful when a to-delete block has weight. - private final static int TASK_PRIORITY_DEFAULT = 1; - // Core pool size for container tasks - private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 10; - - public BlockDeletingService(OzoneContainer ozoneContainer, - long serviceInterval, long serviceTimeout, TimeUnit timeUnit, - Configuration conf) { - super("BlockDeletingService", serviceInterval, timeUnit, - BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout); - this.ozoneContainer = ozoneContainer; - containerDeletionPolicy = ReflectionUtils.newInstance(conf.getClass( - ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, - TopNOrderedContainerDeletionChoosingPolicy.class, - ContainerDeletionChoosingPolicy.class), conf); - this.conf = conf; - this.blockLimitPerTask = - conf.getInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, - OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT); - this.containerLimitPerInterval = - conf.getInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, - OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT); - } - - - @Override - public BackgroundTaskQueue getTasks() { - BackgroundTaskQueue queue = new BackgroundTaskQueue(); - List containers = Lists.newArrayList(); - try { - // We at most list a number of containers a time, - // in case there are too many containers and start too many workers. - // We must ensure there is no empty container in this result. - // The chosen result depends on what container deletion policy is - // configured. - containers = chooseContainerForBlockDeletion(containerLimitPerInterval, - containerDeletionPolicy); - if (containers.size() > 0) { - LOG.info("Plan to choose {} containers for block deletion, " - + "actually returns {} valid containers.", - containerLimitPerInterval, containers.size()); - } - - for(ContainerData container : containers) { - BlockDeletingTask containerTask = - new BlockDeletingTask(container, TASK_PRIORITY_DEFAULT); - queue.add(containerTask); - } - } catch (StorageContainerException e) { - LOG.warn("Failed to initiate block deleting tasks, " - + "caused by unable to get containers info. " - + "Retry in next interval. ", e); - } catch (Exception e) { - // In case listContainer call throws any uncaught RuntimeException. - if (LOG.isDebugEnabled()) { - LOG.debug("Unexpected error occurs during deleting blocks.", e); - } - } - return queue; - } - - public List chooseContainerForBlockDeletion(int count, - ContainerDeletionChoosingPolicy deletionPolicy) - throws StorageContainerException { - Map containerDataMap = - ozoneContainer.getContainerSet().getContainerMap().entrySet().stream() - .filter(e -> isDeletionAllowed(e.getValue().getContainerData(), - deletionPolicy)).collect(Collectors - .toMap(Map.Entry::getKey, e -> e.getValue().getContainerData())); - return deletionPolicy - .chooseContainerForBlockDeletion(count, containerDataMap); - } - - private boolean isDeletionAllowed(ContainerData containerData, - ContainerDeletionChoosingPolicy deletionPolicy) { - if (!deletionPolicy - .isValidContainerType(containerData.getContainerType())) { - return false; - } else if (!containerData.isClosed()) { - return false; - } else { - if (ozoneContainer.getWriteChannel() instanceof XceiverServerRatis) { - XceiverServerRatis ratisServer = - (XceiverServerRatis) ozoneContainer.getWriteChannel(); - PipelineID pipelineID = PipelineID - .valueOf(UUID.fromString(containerData.getOriginPipelineId())); - // in case te ratis group does not exist, just mark it for deletion. - if (!ratisServer.isExist(pipelineID.getProtobuf())) { - return true; - } - try { - long minReplicatedIndex = - ratisServer.getMinReplicatedIndex(pipelineID); - long containerBCSID = containerData.getBlockCommitSequenceId(); - if (minReplicatedIndex >= 0 && minReplicatedIndex < containerBCSID) { - LOG.warn("Close Container log Index {} is not replicated across all" - + "the servers in the pipeline {} as the min replicated " - + "index is {}. Deletion is not allowed in this container " - + "yet.", containerBCSID, - containerData.getOriginPipelineId(), minReplicatedIndex); - return false; - } else { - return true; - } - } catch (IOException ioe) { - // in case of any exception check again whether the pipeline exist - // and in case the pipeline got destroyed, just mark it for deletion - if (!ratisServer.isExist(pipelineID.getProtobuf())) { - return true; - } else { - LOG.info(ioe.getMessage()); - return false; - } - } - } - return true; - } - } - - private static class ContainerBackgroundTaskResult - implements BackgroundTaskResult { - private List deletedBlockIds; - - ContainerBackgroundTaskResult() { - deletedBlockIds = new LinkedList<>(); - } - - public void addBlockId(String blockId) { - deletedBlockIds.add(blockId); - } - - public void addAll(List blockIds) { - deletedBlockIds.addAll(blockIds); - } - - public List getDeletedBlocks() { - return deletedBlockIds; - } - - @Override - public int getSize() { - return deletedBlockIds.size(); - } - } - - private class BlockDeletingTask - implements BackgroundTask { - - private final int priority; - private final KeyValueContainerData containerData; - - BlockDeletingTask(ContainerData containerName, int priority) { - this.priority = priority; - this.containerData = (KeyValueContainerData) containerName; - } - - @Override - public BackgroundTaskResult call() throws Exception { - ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult(); - final Container container = ozoneContainer.getContainerSet() - .getContainer(containerData.getContainerID()); - container.writeLock(); - long startTime = Time.monotonicNow(); - // Scan container's db and get list of under deletion blocks - try (ReferenceCountedDB meta = BlockUtils.getDB(containerData, conf)) { - // # of blocks to delete is throttled - KeyPrefixFilter filter = - new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX); - List> toDeleteBlocks = - meta.getStore().getSequentialRangeKVs(null, blockLimitPerTask, - filter); - if (toDeleteBlocks.isEmpty()) { - LOG.debug("No under deletion block found in container : {}", - containerData.getContainerID()); - } - - List succeedBlocks = new LinkedList<>(); - LOG.debug("Container : {}, To-Delete blocks : {}", - containerData.getContainerID(), toDeleteBlocks.size()); - File dataDir = new File(containerData.getChunksPath()); - if (!dataDir.exists() || !dataDir.isDirectory()) { - LOG.error("Invalid container data dir {} : " - + "does not exist or not a directory", dataDir.getAbsolutePath()); - return crr; - } - - toDeleteBlocks.forEach(entry -> { - String blockName = DFSUtil.bytes2String(entry.getKey()); - LOG.debug("Deleting block {}", blockName); - try { - ContainerProtos.BlockData data = - ContainerProtos.BlockData.parseFrom(entry.getValue()); - for (ContainerProtos.ChunkInfo chunkInfo : data.getChunksList()) { - File chunkFile = dataDir.toPath() - .resolve(chunkInfo.getChunkName()).toFile(); - if (FileUtils.deleteQuietly(chunkFile)) { - if (LOG.isDebugEnabled()) { - LOG.debug("block {} chunk {} deleted", blockName, - chunkFile.getAbsolutePath()); - } - } - } - succeedBlocks.add(blockName); - } catch (InvalidProtocolBufferException e) { - LOG.error("Failed to parse block info for block {}", blockName, e); - } - }); - - // Once files are deleted... replace deleting entries with deleted - // entries - BatchOperation batch = new BatchOperation(); - succeedBlocks.forEach(entry -> { - String blockId = - entry.substring(OzoneConsts.DELETING_KEY_PREFIX.length()); - String deletedEntry = OzoneConsts.DELETED_KEY_PREFIX + blockId; - batch.put(DFSUtil.string2Bytes(deletedEntry), - DFSUtil.string2Bytes(blockId)); - batch.delete(DFSUtil.string2Bytes(entry)); - }); - meta.getStore().writeBatch(batch); - // update count of pending deletion blocks in in-memory container status - containerData.decrPendingDeletionBlocks(succeedBlocks.size()); - - if (!succeedBlocks.isEmpty()) { - LOG.info("Container: {}, deleted blocks: {}, task elapsed time: {}ms", - containerData.getContainerID(), succeedBlocks.size(), - Time.monotonicNow() - startTime); - } - crr.addAll(succeedBlocks); - return crr; - } finally { - container.writeUnlock(); - } - } - - @Override - public int getPriority() { - return priority; - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java deleted file mode 100644 index 69d80425ab7d9..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.statemachine.background; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java deleted file mode 100644 index 8bbdec96695e2..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; - -import org.apache.hadoop.hdds.protocol.datanode.proto - .ContainerProtos.ContainerType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto.State; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.Iterator; -import java.util.Map; - -/** - * Control plane for container management in datanode. - */ -public class ContainerController { - - private final ContainerSet containerSet; - private final Map handlers; - - public ContainerController(final ContainerSet containerSet, - final Map handlers) { - this.containerSet = containerSet; - this.handlers = handlers; - } - - /** - * Returns the Container given a container id. - * - * @param containerId ID of the container - * @return Container - */ - public Container getContainer(final long containerId) { - return containerSet.getContainer(containerId); - } - - /** - * Marks the container for closing. Moves the container to CLOSING state. - * - * @param containerId Id of the container to update - * @throws IOException in case of exception - */ - public void markContainerForClose(final long containerId) - throws IOException { - Container container = containerSet.getContainer(containerId); - - if (container.getContainerState() == State.OPEN) { - getHandler(container).markContainerForClose(container); - } - } - - /** - * Marks the container as UNHEALTHY. - * - * @param containerId Id of the container to update - * @throws IOException in case of exception - */ - public void markContainerUnhealthy(final long containerId) - throws IOException { - Container container = containerSet.getContainer(containerId); - getHandler(container).markContainerUnhealthy(container); - } - - /** - * Returns the container report. - * - * @return ContainerReportsProto - * @throws IOException in case of exception - */ - public ContainerReportsProto getContainerReport() - throws IOException { - return containerSet.getContainerReport(); - } - - /** - * Quasi closes a container given its id. - * - * @param containerId Id of the container to quasi close - * @throws IOException in case of exception - */ - public void quasiCloseContainer(final long containerId) throws IOException { - final Container container = containerSet.getContainer(containerId); - getHandler(container).quasiCloseContainer(container); - } - - /** - * Closes a container given its Id. - * - * @param containerId Id of the container to close - * @throws IOException in case of exception - */ - public void closeContainer(final long containerId) throws IOException { - final Container container = containerSet.getContainer(containerId); - getHandler(container).closeContainer(container); - } - - public Container importContainer(final ContainerType type, - final long containerId, final long maxSize, final String originPipelineId, - final String originNodeId, final InputStream rawContainerStream, - final TarContainerPacker packer) - throws IOException { - return handlers.get(type).importContainer(containerId, maxSize, - originPipelineId, originNodeId, rawContainerStream, packer); - } - - public void exportContainer(final ContainerType type, - final long containerId, final OutputStream outputStream, - final TarContainerPacker packer) throws IOException { - handlers.get(type).exportContainer( - containerSet.getContainer(containerId), outputStream, packer); - } - - /** - * Deletes a container given its Id. - * @param containerId Id of the container to be deleted - * @param force if this is set to true, we delete container without checking - * state of the container. - */ - public void deleteContainer(final long containerId, boolean force) - throws IOException { - final Container container = containerSet.getContainer(containerId); - if (container != null) { - getHandler(container).deleteContainer(container, force); - } - } - - /** - * Given a container, returns its handler instance. - * - * @param container Container - * @return handler of the container - */ - private Handler getHandler(final Container container) { - return handlers.get(container.getContainerType()); - } - - public Iterator> getContainers() { - return containerSet.getContainerIterator(); - } - - /** - * Return an iterator of containers which are associated with the specified - * volume. - * - * @param volume the HDDS volume which should be used to filter containers - * @return {@literal Iterator} - */ - public Iterator> getContainers(HddsVolume volume) { - return containerSet.getContainerIterator(volume); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java deleted file mode 100644 index 1141951dcc00f..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java +++ /dev/null @@ -1,178 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; - -import java.io.IOException; -import java.util.Iterator; -import java.util.concurrent.TimeUnit; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hdfs.util.Canceler; -import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * VolumeScanner scans a single volume. Each VolumeScanner has its own thread. - *

They are all managed by the DataNode's BlockScanner. - */ -public class ContainerDataScanner extends Thread { - public static final Logger LOG = - LoggerFactory.getLogger(ContainerDataScanner.class); - - /** - * The volume that we're scanning. - */ - private final HddsVolume volume; - private final ContainerController controller; - private final DataTransferThrottler throttler; - private final Canceler canceler; - private final ContainerDataScrubberMetrics metrics; - private final long dataScanInterval; - - /** - * True if the thread is stopping.

- * Protected by this object's lock. - */ - private volatile boolean stopping = false; - - - public ContainerDataScanner(ContainerScrubberConfiguration conf, - ContainerController controller, - HddsVolume volume) { - this.controller = controller; - this.volume = volume; - dataScanInterval = conf.getDataScanInterval(); - throttler = new HddsDataTransferThrottler(conf.getBandwidthPerVolume()); - canceler = new Canceler(); - metrics = ContainerDataScrubberMetrics.create(volume.toString()); - setName("ContainerDataScanner(" + volume + ")"); - setDaemon(true); - } - - @Override - public void run() { - if (LOG.isTraceEnabled()) { - LOG.trace("{}: thread starting.", this); - } - try { - while (!stopping) { - runIteration(); - metrics.resetNumContainersScanned(); - metrics.resetNumUnhealthyContainers(); - } - LOG.info("{} exiting.", this); - } catch (Throwable e) { - LOG.error("{} exiting because of exception ", this, e); - } finally { - if (metrics != null) { - metrics.unregister(); - } - } - } - - @VisibleForTesting - public void runIteration() { - long startTime = System.nanoTime(); - Iterator> itr = controller.getContainers(volume); - while (!stopping && itr.hasNext()) { - Container c = itr.next(); - if (c.shouldScanData()) { - try { - if (!c.scanData(throttler, canceler)) { - metrics.incNumUnHealthyContainers(); - controller.markContainerUnhealthy( - c.getContainerData().getContainerID()); - } - } catch (IOException ex) { - long containerId = c.getContainerData().getContainerID(); - LOG.warn("Unexpected exception while scanning container " - + containerId, ex); - } finally { - metrics.incNumContainersScanned(); - } - } - } - long totalDuration = System.nanoTime() - startTime; - if (!stopping) { - if (metrics.getNumContainersScanned() > 0) { - metrics.incNumScanIterations(); - LOG.info("Completed an iteration of container data scrubber in" + - " {} minutes." + - " Number of iterations (since the data-node restart) : {}" + - ", Number of containers scanned in this iteration : {}" + - ", Number of unhealthy containers found in this iteration : {}", - TimeUnit.NANOSECONDS.toMinutes(totalDuration), - metrics.getNumScanIterations(), - metrics.getNumContainersScanned(), - metrics.getNumUnHealthyContainers()); - } - long elapsedMillis = TimeUnit.NANOSECONDS.toMillis(totalDuration); - long remainingSleep = dataScanInterval - elapsedMillis; - if (remainingSleep > 0) { - try { - Thread.sleep(remainingSleep); - } catch (InterruptedException ignored) { - } - } - } - } - - public synchronized void shutdown() { - this.stopping = true; - this.canceler.cancel("ContainerDataScanner("+volume+") is shutting down"); - this.interrupt(); - try { - this.join(); - } catch (InterruptedException ex) { - LOG.warn("Unexpected exception while stopping data scanner for volume " - + volume, ex); - } - } - - @VisibleForTesting - public ContainerDataScrubberMetrics getMetrics() { - return metrics; - } - - @Override - public String toString() { - return "ContainerDataScanner(" + volume + - ", " + volume.getStorageID() + ")"; - } - - private class HddsDataTransferThrottler extends DataTransferThrottler { - HddsDataTransferThrottler(long bandwidthPerSec) { - super(bandwidthPerSec); - } - - @Override - public synchronized void throttle(long numOfBytes) { - ContainerDataScanner.this.metrics.incNumBytesScanned(numOfBytes); - super.throttle(numOfBytes); - } - - @Override - public synchronized void throttle(long numOfBytes, Canceler c) { - ContainerDataScanner.this.metrics.incNumBytesScanned(numOfBytes); - super.throttle(numOfBytes, c); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java deleted file mode 100644 index 3cf4f588322a6..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableCounterInt; -import org.apache.hadoop.metrics2.lib.MutableGaugeInt; -import org.apache.hadoop.metrics2.lib.MutableRate; - -import java.util.concurrent.ThreadLocalRandom; - -/** - * This class captures the container data scrubber metrics on the data-node. - **/ -@InterfaceAudience.Private -@Metrics(about="DataNode container data scrubber metrics", context="dfs") -public final class ContainerDataScrubberMetrics { - private final String name; - private final MetricsSystem ms; - @Metric("number of containers scanned in the current iteration") - private MutableGaugeInt numContainersScanned; - @Metric("number of unhealthy containers found in the current iteration") - private MutableGaugeInt numUnHealthyContainers; - @Metric("number of iterations of scanner completed since the restart") - private MutableCounterInt numScanIterations; - @Metric("disk bandwidth used by the container data scrubber per volume") - private MutableRate numBytesScanned; - - public int getNumContainersScanned() { - return numContainersScanned.value(); - } - - public void incNumContainersScanned() { - numContainersScanned.incr(); - } - - public void resetNumContainersScanned() { - numContainersScanned.decr(getNumContainersScanned()); - } - - public int getNumUnHealthyContainers() { - return numUnHealthyContainers.value(); - } - - public void incNumUnHealthyContainers() { - numUnHealthyContainers.incr(); - } - - public void resetNumUnhealthyContainers() { - numUnHealthyContainers.decr(getNumUnHealthyContainers()); - } - - public int getNumScanIterations() { - return numScanIterations.value(); - } - - public void incNumScanIterations() { - numScanIterations.incr(); - } - - public double getNumBytesScannedMean() { - return numBytesScanned.lastStat().mean(); - } - - public long getNumBytesScannedSampleCount() { - return numBytesScanned.lastStat().numSamples(); - } - - public double getNumBytesScannedStdDev() { - return numBytesScanned.lastStat().stddev(); - } - - public void incNumBytesScanned(long bytes) { - numBytesScanned.add(bytes); - } - - public void unregister() { - ms.unregisterSource(name); - } - - private ContainerDataScrubberMetrics(String name, MetricsSystem ms) { - this.name = name; - this.ms = ms; - } - - public static ContainerDataScrubberMetrics create(final String volumeName) { - MetricsSystem ms = DefaultMetricsSystem.instance(); - String name = "ContainerDataScrubberMetrics-"+ (volumeName.isEmpty() - ? "UndefinedDataNodeVolume"+ ThreadLocalRandom.current().nextInt() - : volumeName.replace(':', '-')); - - return ms.register(name, null, new ContainerDataScrubberMetrics(name, ms)); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java deleted file mode 100644 index 46aaf73a12dd0..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Iterator; -import java.util.concurrent.TimeUnit; - -/** - * This class is responsible to perform metadata verification of the - * containers. - */ -public class ContainerMetadataScanner extends Thread { - public static final Logger LOG = - LoggerFactory.getLogger(ContainerMetadataScanner.class); - - private final ContainerController controller; - private final long metadataScanInterval; - private final ContainerMetadataScrubberMetrics metrics; - /** - * True if the thread is stopping.

- * Protected by this object's lock. - */ - private boolean stopping = false; - - public ContainerMetadataScanner(ContainerScrubberConfiguration conf, - ContainerController controller) { - this.controller = controller; - this.metadataScanInterval = conf.getMetadataScanInterval(); - this.metrics = ContainerMetadataScrubberMetrics.create(); - setName("ContainerMetadataScanner"); - setDaemon(true); - } - - @Override - public void run() { - /* - * the outer daemon loop exits on shutdown() - */ - LOG.info("Background ContainerMetadataScanner starting up"); - while (!stopping) { - runIteration(); - if(!stopping) { - metrics.resetNumUnhealthyContainers(); - metrics.resetNumContainersScanned(); - } - } - } - - @VisibleForTesting - void runIteration() { - long start = System.nanoTime(); - Iterator> containerIt = controller.getContainers(); - while (!stopping && containerIt.hasNext()) { - Container container = containerIt.next(); - try { - scrub(container); - } catch (IOException e) { - LOG.info("Unexpected error while scrubbing container {}", - container.getContainerData().getContainerID()); - } finally { - metrics.incNumContainersScanned(); - } - } - long interval = System.nanoTime()-start; - if (!stopping) { - metrics.incNumScanIterations(); - LOG.info("Completed an iteration of container metadata scrubber in" + - " {} minutes." + - " Number of iterations (since the data-node restart) : {}" + - ", Number of containers scanned in this iteration : {}" + - ", Number of unhealthy containers found in this iteration : {}", - TimeUnit.NANOSECONDS.toMinutes(interval), - metrics.getNumScanIterations(), - metrics.getNumContainersScanned(), - metrics.getNumUnHealthyContainers()); - // ensure to delay next metadata scan with respect to user config. - if (interval < metadataScanInterval) { - try { - Thread.sleep(metadataScanInterval - interval); - } catch (InterruptedException e) { - LOG.info("Background ContainerMetadataScanner interrupted." + - " Going to exit"); - } - } - } - } - - @VisibleForTesting - public void scrub(Container container) throws IOException { - if (!container.scanMetaData()) { - metrics.incNumUnHealthyContainers(); - controller.markContainerUnhealthy( - container.getContainerData().getContainerID()); - } - } - - @VisibleForTesting - public ContainerMetadataScrubberMetrics getMetrics() { - return metrics; - } - - public synchronized void shutdown() { - this.stopping = true; - this.interrupt(); - try { - this.join(); - } catch (InterruptedException ex) { - LOG.warn("Unexpected exception while stopping metadata scanner.", ex); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java deleted file mode 100644 index 3effc351b0051..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableCounterInt; -import org.apache.hadoop.metrics2.lib.MutableGaugeInt; - -/** - * This class captures the container meta-data scrubber metrics on the - * data-node. - **/ -@InterfaceAudience.Private -@Metrics(about="DataNode container data scrubber metrics", context="dfs") -public final class ContainerMetadataScrubberMetrics { - private final String name; - private final MetricsSystem ms; - @Metric("number of containers scanned in the current iteration") - private MutableGaugeInt numContainersScanned; - @Metric("number of unhealthy containers found in the current iteration") - private MutableGaugeInt numUnHealthyContainers; - @Metric("number of iterations of scanner completed since the restart") - private MutableCounterInt numScanIterations; - - public int getNumContainersScanned() { - return numContainersScanned.value(); - } - - public void incNumContainersScanned() { - numContainersScanned.incr(); - } - - public void resetNumContainersScanned() { - numContainersScanned.decr(getNumContainersScanned()); - } - - public int getNumUnHealthyContainers() { - return numUnHealthyContainers.value(); - } - - public void incNumUnHealthyContainers() { - numUnHealthyContainers.incr(); - } - - public void resetNumUnhealthyContainers() { - numUnHealthyContainers.decr(getNumUnHealthyContainers()); - } - - public int getNumScanIterations() { - return numScanIterations.value(); - } - - public void incNumScanIterations() { - numScanIterations.incr(); - } - - public void unregister() { - ms.unregisterSource(name); - } - - private ContainerMetadataScrubberMetrics(String name, MetricsSystem ms) { - this.name = name; - this.ms = ms; - } - - public static ContainerMetadataScrubberMetrics create() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - String name = "ContainerMetadataScrubberMetrics"; - return ms.register(name, null, - new ContainerMetadataScrubberMetrics(name, ms)); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java deleted file mode 100644 index 621da70735df2..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.ozoneimpl; - -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.Storage; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileFilter; -import java.io.IOException; -import java.util.List; - -/** - * Class used to read .container files from Volume and build container map. - * - * Layout of the container directory on disk is as follows: - * - *

../hdds/VERSION - *

{@literal ../hdds/<>/current/<>/</metadata/<>.container} - *

{@literal ../hdds/<>/current/<>/</<>} - *

- * Some ContainerTypes will have extra metadata other than the .container - * file. For example, KeyValueContainer will have a .db file. This .db file - * will also be stored in the metadata folder along with the .container file. - *

- * {@literal ../hdds/<>/current/<>/</metadata/<>.db} - *

- * Note that the {@literal <>} is dependent on the ContainerType. - * For KeyValueContainers, the data is stored in a "chunks" folder. As such, - * the {@literal <>} layout for KeyValueContainers is: - *

{@literal ../hdds/<>/current/<>/</chunks/<>} - * - */ -public class ContainerReader implements Runnable { - - private static final Logger LOG = LoggerFactory.getLogger( - ContainerReader.class); - private HddsVolume hddsVolume; - private final ContainerSet containerSet; - private final OzoneConfiguration config; - private final File hddsVolumeDir; - private final VolumeSet volumeSet; - - ContainerReader(VolumeSet volSet, HddsVolume volume, ContainerSet cset, - OzoneConfiguration conf) { - Preconditions.checkNotNull(volume); - this.hddsVolume = volume; - this.hddsVolumeDir = hddsVolume.getHddsRootDir(); - this.containerSet = cset; - this.config = conf; - this.volumeSet = volSet; - } - - @Override - public void run() { - try { - readVolume(hddsVolumeDir); - } catch (RuntimeException ex) { - LOG.error("Caught a Run time exception during reading container files" + - " from Volume {} {}", hddsVolumeDir, ex); - } - } - - public void readVolume(File hddsVolumeRootDir) { - Preconditions.checkNotNull(hddsVolumeRootDir, "hddsVolumeRootDir" + - "cannot be null"); - - //filtering scm directory - File[] scmDir = hddsVolumeRootDir.listFiles(new FileFilter() { - @Override - public boolean accept(File pathname) { - return pathname.isDirectory(); - } - }); - - if (scmDir == null) { - LOG.error("IO error for the volume {}, skipped loading", - hddsVolumeRootDir); - volumeSet.failVolume(hddsVolumeRootDir.getPath()); - return; - } - - if (scmDir.length > 1) { - LOG.error("Volume {} is in Inconsistent state", hddsVolumeRootDir); - volumeSet.failVolume(hddsVolumeRootDir.getPath()); - return; - } - - for (File scmLoc : scmDir) { - File currentDir = new File(scmLoc, Storage.STORAGE_DIR_CURRENT); - File[] containerTopDirs = currentDir.listFiles(); - if (containerTopDirs != null) { - for (File containerTopDir : containerTopDirs) { - if (containerTopDir.isDirectory()) { - File[] containerDirs = containerTopDir.listFiles(); - if (containerDirs != null) { - for (File containerDir : containerDirs) { - File containerFile = ContainerUtils.getContainerFile( - containerDir); - long containerID = ContainerUtils.getContainerID(containerDir); - if (containerFile.exists()) { - verifyContainerFile(containerID, containerFile); - } else { - LOG.error("Missing .container file for ContainerID: {}", - containerDir.getName()); - } - } - } - } - } - } - } - } - - private void verifyContainerFile(long containerID, File containerFile) { - try { - ContainerData containerData = ContainerDataYaml.readContainerFile( - containerFile); - if (containerID != containerData.getContainerID()) { - LOG.error("Invalid ContainerID in file {}. " + - "Skipping loading of this container.", containerFile); - return; - } - verifyAndFixupContainerData(containerData); - } catch (IOException ex) { - LOG.error("Failed to parse ContainerFile for ContainerID: {}", - containerID, ex); - } - } - - /** - * verify ContainerData loaded from disk and fix-up stale members. - * Specifically blockCommitSequenceId, delete related metadata - * and bytesUsed - * @param containerData - * @throws IOException - */ - public void verifyAndFixupContainerData(ContainerData containerData) - throws IOException { - switch (containerData.getContainerType()) { - case KeyValueContainer: - if (containerData instanceof KeyValueContainerData) { - KeyValueContainerData kvContainerData = (KeyValueContainerData) - containerData; - containerData.setVolume(hddsVolume); - - KeyValueContainerUtil.parseKVContainerData(kvContainerData, config); - KeyValueContainer kvContainer = new KeyValueContainer( - kvContainerData, config); - try(ReferenceCountedDB containerDB = BlockUtils.getDB(kvContainerData, - config)) { - MetadataKeyFilters.KeyPrefixFilter filter = - new MetadataKeyFilters.KeyPrefixFilter() - .addFilter(OzoneConsts.DELETING_KEY_PREFIX); - int numPendingDeletionBlocks = - containerDB.getStore().getSequentialRangeKVs(null, - Integer.MAX_VALUE, filter) - .size(); - kvContainerData.incrPendingDeletionBlocks(numPendingDeletionBlocks); - byte[] delTxnId = containerDB.getStore().get( - DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX)); - if (delTxnId != null) { - kvContainerData - .updateDeleteTransactionId(Longs.fromByteArray(delTxnId)); - } - // sets the BlockCommitSequenceId. - byte[] bcsId = containerDB.getStore().get(DFSUtil.string2Bytes( - OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX)); - if (bcsId != null) { - kvContainerData - .updateBlockCommitSequenceId(Longs.fromByteArray(bcsId)); - } - if (kvContainer.getContainerState() - == ContainerProtos.ContainerDataProto.State.OPEN) { - // commitSpace for Open Containers relies on usedBytes - initializeUsedBytes(kvContainer); - } - containerSet.addContainer(kvContainer); - } - } else { - throw new StorageContainerException("Container File is corrupted. " + - "ContainerType is KeyValueContainer but cast to " + - "KeyValueContainerData failed. ", - ContainerProtos.Result.CONTAINER_METADATA_ERROR); - } - break; - default: - throw new StorageContainerException("Unrecognized ContainerType " + - containerData.getContainerType(), - ContainerProtos.Result.UNKNOWN_CONTAINER_TYPE); - } - } - - private void initializeUsedBytes(KeyValueContainer container) - throws IOException { - try (KeyValueBlockIterator blockIter = new KeyValueBlockIterator( - container.getContainerData().getContainerID(), - new File(container.getContainerData().getContainerPath()))) { - long usedBytes = 0; - - while (blockIter.hasNext()) { - BlockData block = blockIter.nextBlock(); - long blockLen = 0; - - List chunkInfoList = block.getChunks(); - for (ContainerProtos.ChunkInfo chunk : chunkInfoList) { - ChunkInfo info = ChunkInfo.getFromProtoBuf(chunk); - blockLen += info.getLen(); - } - - usedBytes += blockLen; - } - - container.getContainerData().setBytesUsed(usedBytes); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java deleted file mode 100644 index 454ce84310aaf..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; - -import org.apache.hadoop.hdds.conf.Config; -import org.apache.hadoop.hdds.conf.ConfigGroup; -import org.apache.hadoop.hdds.conf.ConfigTag; -import org.apache.hadoop.hdds.conf.ConfigType; - -/** - * This class defines configuration parameters for container scrubber. - **/ -@ConfigGroup(prefix = "hdds.containerscrub") -public class ContainerScrubberConfiguration { - private boolean enabled; - private long metadataScanInterval; - private long dataScanInterval; - private long bandwidthPerVolume; - - @Config(key = "enabled", - type = ConfigType.BOOLEAN, - defaultValue = "false", - tags = {ConfigTag.STORAGE}, - description = "Config parameter to enable container scrubber.") - public void setEnabled(boolean enabled) { - this.enabled = enabled; - } - - public boolean isEnabled() { - return enabled; - } - - @Config(key = "metadata.scan.interval", - type = ConfigType.TIME, - defaultValue = "3h", - tags = {ConfigTag.STORAGE}, - description = "Config parameter define time interval in milliseconds" + - " between two metadata scans by container scrubber.") - public void setMetadataScanInterval(long metadataScanInterval) { - this.metadataScanInterval = metadataScanInterval; - } - - public long getMetadataScanInterval() { - return metadataScanInterval; - } - - @Config(key = "data.scan.interval", - type = ConfigType.TIME, - defaultValue = "1m", - tags = { ConfigTag.STORAGE }, - description = "Minimum time interval between two iterations of container" - + " data scanning. If an iteration takes less time than this, the" - + " scanner will wait before starting the next iteration." - ) - public void setDataScanInterval(long dataScanInterval) { - this.dataScanInterval = dataScanInterval; - } - - public long getDataScanInterval() { - return dataScanInterval; - } - - @Config(key = "volume.bytes.per.second", - type = ConfigType.LONG, - defaultValue = "1048576", - tags = {ConfigTag.STORAGE}, - description = "Config parameter to throttle I/O bandwidth used" - + " by scrubber per volume.") - public void setBandwidthPerVolume(long bandwidthPerVolume) { - this.bandwidthPerVolume = bandwidthPerVolume; - } - - public long getBandwidthPerVolume() { - return bandwidthPerVolume; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java deleted file mode 100644 index a026f0e8757b4..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.ozoneimpl; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Maps; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto - .ContainerProtos.ContainerType; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc; -import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; - -import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService; -import org.apache.hadoop.ozone.container.replication.GrpcReplicationService; -import org.apache.hadoop.ozone.container.replication - .OnDemandContainerReplicationSource; -import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.*; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.*; - -/** - * Ozone main class sets up the network servers and initializes the container - * layer. - */ -public class OzoneContainer { - - private static final Logger LOG = LoggerFactory.getLogger( - OzoneContainer.class); - - private final HddsDispatcher hddsDispatcher; - private final Map handlers; - private final OzoneConfiguration config; - private final VolumeSet volumeSet; - private final ContainerSet containerSet; - private final XceiverServerSpi writeChannel; - private final XceiverServerSpi readChannel; - private final ContainerController controller; - private ContainerMetadataScanner metadataScanner; - private List dataScanners; - private final BlockDeletingService blockDeletingService; - - /** - * Construct OzoneContainer object. - * @param datanodeDetails - * @param conf - * @param certClient - * @throws DiskOutOfSpaceException - * @throws IOException - */ - public OzoneContainer(DatanodeDetails datanodeDetails, OzoneConfiguration - conf, StateContext context, CertificateClient certClient) - throws IOException { - this.config = conf; - this.volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf); - this.containerSet = new ContainerSet(); - this.metadataScanner = null; - - buildContainerSet(); - final ContainerMetrics metrics = ContainerMetrics.create(conf); - this.handlers = Maps.newHashMap(); - for (ContainerType containerType : ContainerType.values()) { - handlers.put(containerType, - Handler.getHandlerForContainerType( - containerType, conf, context, containerSet, volumeSet, metrics)); - } - this.hddsDispatcher = new HddsDispatcher(config, containerSet, volumeSet, - handlers, context, metrics); - - /* - * ContainerController is the control plane - * XceiverServerRatis is the write channel - * XceiverServerGrpc is the read channel - */ - this.controller = new ContainerController(containerSet, handlers); - this.writeChannel = XceiverServerRatis.newXceiverServerRatis( - datanodeDetails, config, hddsDispatcher, controller, certClient, - context); - this.readChannel = new XceiverServerGrpc( - datanodeDetails, config, hddsDispatcher, certClient, - createReplicationService()); - long svcInterval = config - .getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, - OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - long serviceTimeout = config - .getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, - OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - this.blockDeletingService = - new BlockDeletingService(this, svcInterval, serviceTimeout, - TimeUnit.MILLISECONDS, config); - } - - private GrpcReplicationService createReplicationService() { - return new GrpcReplicationService( - new OnDemandContainerReplicationSource(controller)); - } - - /** - * Build's container map. - */ - private void buildContainerSet() { - Iterator volumeSetIterator = volumeSet.getVolumesList() - .iterator(); - ArrayList volumeThreads = new ArrayList(); - - //TODO: diskchecker should be run before this, to see how disks are. - // And also handle disk failure tolerance need to be added - while (volumeSetIterator.hasNext()) { - HddsVolume volume = volumeSetIterator.next(); - Thread thread = new Thread(new ContainerReader(volumeSet, volume, - containerSet, config)); - thread.start(); - volumeThreads.add(thread); - } - - try { - for (int i = 0; i < volumeThreads.size(); i++) { - volumeThreads.get(i).join(); - } - } catch (InterruptedException ex) { - LOG.info("Volume Threads Interrupted exception", ex); - } - - } - - - /** - * Start background daemon thread for performing container integrity checks. - */ - private void startContainerScrub() { - ContainerScrubberConfiguration c = config.getObject( - ContainerScrubberConfiguration.class); - boolean enabled = c.isEnabled(); - - if (!enabled) { - LOG.info("Background container scanner has been disabled."); - } else { - if (this.metadataScanner == null) { - this.metadataScanner = new ContainerMetadataScanner(c, controller); - } - this.metadataScanner.start(); - - dataScanners = new ArrayList<>(); - for (HddsVolume v : volumeSet.getVolumesList()) { - ContainerDataScanner s = new ContainerDataScanner(c, controller, v); - s.start(); - dataScanners.add(s); - } - } - } - - /** - * Stop the scanner thread and wait for thread to die. - */ - private void stopContainerScrub() { - if (metadataScanner == null) { - return; - } - metadataScanner.shutdown(); - metadataScanner = null; - for (ContainerDataScanner s : dataScanners) { - s.shutdown(); - } - } - - /** - * Starts serving requests to ozone container. - * - * @throws IOException - */ - public void start(String scmId) throws IOException { - LOG.info("Attempting to start container services."); - startContainerScrub(); - writeChannel.start(); - readChannel.start(); - hddsDispatcher.init(); - hddsDispatcher.setScmId(scmId); - blockDeletingService.start(); - } - - /** - * Stop Container Service on the datanode. - */ - public void stop() { - //TODO: at end of container IO integration work. - LOG.info("Attempting to stop container services."); - stopContainerScrub(); - writeChannel.stop(); - readChannel.stop(); - this.handlers.values().forEach(Handler::stop); - hddsDispatcher.shutdown(); - volumeSet.shutdown(); - blockDeletingService.shutdown(); - ContainerMetrics.remove(); - } - - - @VisibleForTesting - public ContainerSet getContainerSet() { - return containerSet; - } - /** - * Returns container report. - * @return - container report. - */ - - public PipelineReportsProto getPipelineReport() { - PipelineReportsProto.Builder pipelineReportsProto = - PipelineReportsProto.newBuilder(); - pipelineReportsProto.addAllPipelineReport(writeChannel.getPipelineReport()); - return pipelineReportsProto.build(); - } - - public XceiverServerSpi getWriteChannel() { - return writeChannel; - } - - public XceiverServerSpi getReadChannel() { - return readChannel; - } - - public ContainerController getController() { - return controller; - } - - /** - * Returns node report of container storage usage. - */ - public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() - throws IOException { - return volumeSet.getNodeReport(); - } - - @VisibleForTesting - public ContainerDispatcher getDispatcher() { - return this.hddsDispatcher; - } - - public VolumeSet getVolumeSet() { - return volumeSet; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java deleted file mode 100644 index c99c038b244e9..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; -/** - Ozone main that calls into the container layer -**/ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerDownloader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerDownloader.java deleted file mode 100644 index 9511241fb5f2d..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerDownloader.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import java.io.Closeable; -import java.nio.file.Path; -import java.util.List; -import java.util.concurrent.CompletableFuture; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; - -/** - * Service to download container data from other datanodes. - *

- * The implementation of this interface should copy the raw container data in - * compressed form to working directory. - *

- * A smart implementation would use multiple sources to do parallel download. - */ -public interface ContainerDownloader extends Closeable { - - CompletableFuture getContainerDataFromReplicas(long containerId, - List sources); - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicationSource.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicationSource.java deleted file mode 100644 index 69582f799f8b4..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicationSource.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import java.io.IOException; -import java.io.OutputStream; - -/** - * Contract to prepare provide the container in binary form.. - *

- * Prepare will be called when container is closed. An implementation could - * precache any binary representation of a container and store the pre packede - * images. - */ -public interface ContainerReplicationSource { - - /** - * Prepare for the replication. - * - * @param containerId The name of the container the package. - */ - void prepare(long containerId); - - /** - * Copy the container data to an output stream. - * - * @param containerId Container to replicate - * @param destination The destination stream to copy all the container data. - * @throws IOException - */ - void copyData(long containerId, OutputStream destination) - throws IOException; - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicator.java deleted file mode 100644 index 827b9d69e8b19..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicator.java +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -/** - * Service to do the real replication task. - * - * An implementation should download the container and im - */ -public interface ContainerReplicator { - void replicate(ReplicationTask task); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerStreamingOutput.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerStreamingOutput.java deleted file mode 100644 index f7fd8a4957dc7..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerStreamingOutput.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.StreamingOutput; -import java.io.IOException; -import java.io.OutputStream; - -/** - * JAX-RS streaming output to return the binary container data. - */ -public class ContainerStreamingOutput implements StreamingOutput { - - private long containerId; - - private ContainerReplicationSource containerReplicationSource; - - public ContainerStreamingOutput(long containerId, - ContainerReplicationSource containerReplicationSource) { - this.containerId = containerId; - this.containerReplicationSource = containerReplicationSource; - } - - @Override - public void write(OutputStream outputStream) - throws IOException, WebApplicationException { - containerReplicationSource.copyData(containerId, outputStream); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/DownloadAndImportReplicator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/DownloadAndImportReplicator.java deleted file mode 100644 index eef01a13f2f4b..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/DownloadAndImportReplicator.java +++ /dev/null @@ -1,135 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import java.io.FileInputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.List; -import java.util.concurrent.CompletableFuture; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.ozone.container.replication.ReplicationTask.Status; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Default replication implementation. - *

- * This class does the real job. Executes the download and import the container - * to the container set. - */ -public class DownloadAndImportReplicator implements ContainerReplicator { - - private static final Logger LOG = - LoggerFactory.getLogger(DownloadAndImportReplicator.class); - - private final ContainerSet containerSet; - - private final ContainerController controller; - - private final ContainerDownloader downloader; - - private final TarContainerPacker packer; - - public DownloadAndImportReplicator( - ContainerSet containerSet, - ContainerController controller, - ContainerDownloader downloader, - TarContainerPacker packer) { - this.containerSet = containerSet; - this.controller = controller; - this.downloader = downloader; - this.packer = packer; - } - - public void importContainer(long containerID, Path tarFilePath) { - try { - ContainerData originalContainerData; - try (FileInputStream tempContainerTarStream = new FileInputStream( - tarFilePath.toFile())) { - byte[] containerDescriptorYaml = - packer.unpackContainerDescriptor(tempContainerTarStream); - originalContainerData = ContainerDataYaml.readContainer( - containerDescriptorYaml); - } - - try (FileInputStream tempContainerTarStream = new FileInputStream( - tarFilePath.toFile())) { - - Container container = controller.importContainer( - originalContainerData.getContainerType(), - containerID, - originalContainerData.getMaxSize(), - originalContainerData.getOriginPipelineId(), - originalContainerData.getOriginNodeId(), - tempContainerTarStream, - packer); - - containerSet.addContainer(container); - } - - } catch (Exception e) { - LOG.error( - "Can't import the downloaded container data id=" + containerID, - e); - } finally { - try { - Files.delete(tarFilePath); - } catch (Exception ex) { - LOG.error("Got exception while deleting downloaded container file: " - + tarFilePath.toAbsolutePath().toString(), ex); - } - } - } - - @Override - public void replicate(ReplicationTask task) { - long containerID = task.getContainerId(); - - List sourceDatanodes = task.getSources(); - - LOG.info("Starting replication of container {} from {}", containerID, - sourceDatanodes); - - CompletableFuture tempTarFile = downloader - .getContainerDataFromReplicas(containerID, - sourceDatanodes); - - try { - //wait for the download. This thread pool is limiting the paralell - //downloads, so it's ok to block here and wait for the full download. - Path path = tempTarFile.get(); - LOG.info("Container {} is downloaded, starting to import.", - containerID); - importContainer(containerID, path); - LOG.info("Container {} is replicated successfully", containerID); - task.setStatus(Status.DONE); - } catch (Exception e) { - LOG.error("Container replication was unsuccessful .", e); - task.setStatus(Status.FAILED); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java deleted file mode 100644 index 8494a15274424..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.replication; - -import java.io.BufferedOutputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .CopyContainerRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .CopyContainerResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto - .IntraDatanodeProtocolServiceGrpc; -import org.apache.hadoop.hdds.protocol.datanode.proto - .IntraDatanodeProtocolServiceGrpc.IntraDatanodeProtocolServiceStub; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.ratis.thirdparty.io.grpc.ManagedChannel; -import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder; -import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Client to read container data from Grpc. - */ -public class GrpcReplicationClient { - - private static final Logger LOG = - LoggerFactory.getLogger(GrpcReplicationClient.class); - - private final ManagedChannel channel; - - private final IntraDatanodeProtocolServiceStub client; - - private final Path workingDirectory; - - public GrpcReplicationClient(String host, - int port, Path workingDir) { - - channel = NettyChannelBuilder.forAddress(host, port) - .usePlaintext() - .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) - .build(); - client = IntraDatanodeProtocolServiceGrpc.newStub(channel); - this.workingDirectory = workingDir; - - } - - public CompletableFuture download(long containerId) { - CopyContainerRequestProto request = - CopyContainerRequestProto.newBuilder() - .setContainerID(containerId) - .setLen(-1) - .setReadOffset(0) - .build(); - - CompletableFuture response = new CompletableFuture<>(); - - Path destinationPath = - getWorkingDirectory().resolve("container-" + containerId + ".tar.gz"); - - client.download(request, - new StreamDownloader(containerId, response, destinationPath)); - return response; - } - - private Path getWorkingDirectory() { - return workingDirectory; - } - - public void shutdown() { - channel.shutdown(); - try { - channel.awaitTermination(5, TimeUnit.SECONDS); - } catch (Exception e) { - LOG.error("failed to shutdown replication channel", e); - } - } - - /** - * Grpc stream observer to ComletableFuture adapter. - */ - public static class StreamDownloader - implements StreamObserver { - - private final CompletableFuture response; - - private final long containerId; - - private BufferedOutputStream stream; - - private Path outputPath; - - public StreamDownloader(long containerId, CompletableFuture response, - Path outputPath) { - this.response = response; - this.containerId = containerId; - this.outputPath = outputPath; - try { - Preconditions.checkNotNull(outputPath, "Output path cannot be null"); - Path parentPath = Preconditions.checkNotNull(outputPath.getParent()); - Files.createDirectories(parentPath); - stream = - new BufferedOutputStream(new FileOutputStream(outputPath.toFile())); - } catch (IOException e) { - throw new RuntimeException("OutputPath can't be used: " + outputPath, - e); - } - - } - - @Override - public void onNext(CopyContainerResponseProto chunk) { - try { - stream.write(chunk.getData().toByteArray()); - } catch (IOException e) { - response.completeExceptionally(e); - } - } - - @Override - public void onError(Throwable throwable) { - try { - stream.close(); - LOG.error("Container download was unsuccessfull", throwable); - try { - Files.delete(outputPath); - } catch (IOException ex) { - LOG.error( - "Error happened during the download but can't delete the " - + "temporary destination.", ex); - } - response.completeExceptionally(throwable); - } catch (IOException e) { - response.completeExceptionally(e); - } - } - - @Override - public void onCompleted() { - try { - stream.close(); - LOG.info("Container is downloaded to {}", outputPath); - response.complete(outputPath); - } catch (IOException e) { - response.completeExceptionally(e); - } - - } - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java deleted file mode 100644 index 7919e54953140..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.replication; - -import java.io.ByteArrayOutputStream; -import java.io.Closeable; -import java.io.IOException; -import java.io.OutputStream; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .CopyContainerRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .CopyContainerResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto - .IntraDatanodeProtocolServiceGrpc; - -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Service to make containers available for replication. - */ -public class GrpcReplicationService extends - IntraDatanodeProtocolServiceGrpc.IntraDatanodeProtocolServiceImplBase { - - private static final Logger LOG = - LoggerFactory.getLogger(GrpcReplicationService.class); - - private final ContainerReplicationSource containerReplicationSource; - - public GrpcReplicationService( - ContainerReplicationSource containerReplicationSource) { - this.containerReplicationSource = containerReplicationSource; - } - - @Override - public void download(CopyContainerRequestProto request, - StreamObserver responseObserver) { - LOG.info("Streaming container data ({}) to other datanode", - request.getContainerID()); - try { - GrpcOutputStream outputStream = - new GrpcOutputStream(responseObserver, request.getContainerID()); - containerReplicationSource - .copyData(request.getContainerID(), outputStream); - } catch (IOException e) { - LOG.error("Can't stream the container data", e); - responseObserver.onError(e); - } - } - - private static class GrpcOutputStream extends OutputStream - implements Closeable { - - private static final int BUFFER_SIZE_IN_BYTES = 1024 * 1024; - - private final StreamObserver responseObserver; - - private final ByteArrayOutputStream buffer = new ByteArrayOutputStream(); - - private long containerId; - - private int readOffset = 0; - - private int writtenBytes; - - GrpcOutputStream( - StreamObserver responseObserver, - long containerId) { - this.responseObserver = responseObserver; - this.containerId = containerId; - } - - @Override - public void write(int b) throws IOException { - try { - buffer.write(b); - if (buffer.size() > BUFFER_SIZE_IN_BYTES) { - flushBuffer(false); - } - } catch (Exception ex) { - responseObserver.onError(ex); - } - } - - private void flushBuffer(boolean eof) { - if (buffer.size() > 0) { - CopyContainerResponseProto response = - CopyContainerResponseProto.newBuilder() - .setContainerID(containerId) - .setData(ByteString.copyFrom(buffer.toByteArray())) - .setEof(eof) - .setReadOffset(readOffset) - .setLen(buffer.size()) - .build(); - responseObserver.onNext(response); - readOffset += buffer.size(); - writtenBytes += buffer.size(); - buffer.reset(); - } - } - - @Override - public void close() throws IOException { - flushBuffer(true); - LOG.info("{} bytes written to the rpc stream from container {}", - writtenBytes, containerId); - responseObserver.onCompleted(); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/OnDemandContainerReplicationSource.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/OnDemandContainerReplicationSource.java deleted file mode 100644 index d318ffa257f7d..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/OnDemandContainerReplicationSource.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import java.io.IOException; -import java.io.OutputStream; - -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A naive implementation of the replication source which creates a tar file - * on-demand without pre-create the compressed archives. - */ -public class OnDemandContainerReplicationSource - implements ContainerReplicationSource { - - private static final Logger LOG = - LoggerFactory.getLogger(ContainerReplicationSource.class); - - private ContainerController controller; - - private TarContainerPacker packer = new TarContainerPacker(); - - public OnDemandContainerReplicationSource( - ContainerController controller) { - this.controller = controller; - } - - @Override - public void prepare(long containerId) { - - } - - @Override - public void copyData(long containerId, OutputStream destination) - throws IOException { - - Container container = controller.getContainer(containerId); - - Preconditions.checkNotNull( - container, "Container is not found " + containerId); - - controller.exportContainer( - container.getContainerType(), containerId, destination, packer); - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java deleted file mode 100644 index 7a07c4df71eb5..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentHashMap.KeySetView; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.replication.ReplicationTask.Status; - -import com.google.common.annotations.VisibleForTesting; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Single point to schedule the downloading tasks based on priorities. - */ -public class ReplicationSupervisor { - - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationSupervisor.class); - - private final ContainerSet containerSet; - private final ContainerReplicator replicator; - private final ThreadPoolExecutor executor; - private final AtomicLong replicationCounter; - - /** - * A set of container IDs that are currently being downloaded - * or queued for download. Tracked so we don't schedule > 1 - * concurrent download for the same container. - */ - private final KeySetView containersInFlight; - - public ReplicationSupervisor( - ContainerSet containerSet, - ContainerReplicator replicator, int poolSize) { - this.containerSet = containerSet; - this.replicator = replicator; - this.containersInFlight = ConcurrentHashMap.newKeySet(); - replicationCounter = new AtomicLong(); - this.executor = new ThreadPoolExecutor( - 0, poolSize, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue<>(), - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("ContainerReplicationThread-%d") - .build()); - } - - /** - * Queue an asynchronous download of the given container. - */ - public void addTask(ReplicationTask task) { - if (containersInFlight.add(task.getContainerId())) { - executor.submit(new TaskRunner(task)); - } - } - - public void stop() { - try { - executor.shutdown(); - if (!executor.awaitTermination(3, TimeUnit.SECONDS)) { - executor.shutdownNow(); - } - } catch (InterruptedException ie) { - // Ignore, we don't really care about the failure. - Thread.currentThread().interrupt(); - } - } - - /** - * Get the number of containers currently being downloaded - * or scheduled for download. - * @return Count of in-flight replications. - */ - @VisibleForTesting - public int getInFlightReplications() { - return containersInFlight.size(); - } - - private final class TaskRunner implements Runnable { - private final ReplicationTask task; - - private TaskRunner(ReplicationTask task) { - this.task = task; - } - - @Override - public void run() { - try { - if (containerSet.getContainer(task.getContainerId()) != null) { - LOG.debug("Container {} has already been downloaded.", - task.getContainerId()); - return; - } - - task.setStatus(Status.DOWNLOADING); - replicator.replicate(task); - - if (task.getStatus() == Status.FAILED) { - LOG.error( - "Container {} can't be downloaded from any of the datanodes.", - task.getContainerId()); - } else if (task.getStatus() == Status.DONE) { - LOG.info("Container {} is replicated.", task.getContainerId()); - } - } finally { - containersInFlight.remove(task.getContainerId()); - replicationCounter.incrementAndGet(); - } - } - } - - public long getReplicationCounter() { - return replicationCounter.get(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java deleted file mode 100644 index 90198110b59b3..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import java.time.Instant; -import java.util.List; -import java.util.Objects; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; - -/** - * The task to download a container from the sources. - */ -public class ReplicationTask { - - private volatile Status status = Status.QUEUED; - - private final long containerId; - - private List sources; - - private final Instant queued = Instant.now(); - - public ReplicationTask(long containerId, - List sources) { - this.containerId = containerId; - this.sources = sources; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ReplicationTask that = (ReplicationTask) o; - return containerId == that.containerId; - } - - @Override - public int hashCode() { - return Objects.hash(containerId); - } - - public long getContainerId() { - return containerId; - } - - public List getSources() { - return sources; - } - - public Status getStatus() { - return status; - } - - public void setStatus( - Status status) { - this.status = status; - } - - @Override - public String toString() { - return "ReplicationTask{" + - "status=" + status + - ", containerId=" + containerId + - ", sources=" + sources + - ", queued=" + queued + - '}'; - } - - public Instant getQueued() { - return queued; - } - - /** - * Status of the replication. - */ - public enum Status { - QUEUED, - DOWNLOADING, - FAILED, - DONE - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java deleted file mode 100644 index 37a44acf74c9f..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.replication; - -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.function.Function; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; -import org.apache.hadoop.ozone.OzoneConfigKeys; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Simple ContainerDownloaderImplementation to download the missing container - * from the first available datanode. - *

- * This is not the most effective implementation as it uses only one source - * for he container download. - */ -public class SimpleContainerDownloader implements ContainerDownloader { - - private static final Logger LOG = - LoggerFactory.getLogger(SimpleContainerDownloader.class); - - private final Path workingDirectory; - - public SimpleContainerDownloader(Configuration conf) { - - String workDirString = - conf.get(OzoneConfigKeys.OZONE_CONTAINER_COPY_WORKDIR); - - if (workDirString == null) { - workingDirectory = Paths.get(System.getProperty("java.io.tmpdir")) - .resolve("container-copy"); - } else { - workingDirectory = Paths.get(workDirString); - } - } - - @Override - public CompletableFuture getContainerDataFromReplicas(long containerId, - List sourceDatanodes) { - - CompletableFuture result = null; - for (DatanodeDetails datanode : sourceDatanodes) { - try { - - if (result == null) { - GrpcReplicationClient grpcReplicationClient = - new GrpcReplicationClient(datanode.getIpAddress(), - datanode.getPort(Name.STANDALONE).getValue(), - workingDirectory); - result = grpcReplicationClient.download(containerId); - } else { - result = result.thenApply(CompletableFuture::completedFuture) - .exceptionally(t -> { - LOG.error("Error on replicating container: " + containerId, t); - GrpcReplicationClient grpcReplicationClient = - new GrpcReplicationClient(datanode.getIpAddress(), - datanode.getPort(Name.STANDALONE).getValue(), - workingDirectory); - return grpcReplicationClient.download(containerId); - }).thenCompose(Function.identity()); - } - } catch (Exception ex) { - LOG.error(String.format( - "Container %s download from datanode %s was unsuccessful. " - + "Trying the next datanode", containerId, datanode), ex); - } - - } - return result; - - } - - @Override - public void close() { - // noop - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java deleted file mode 100644 index 38a853c72a040..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; -/** - Classes to replicate container data between datanodes. -**/ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java deleted file mode 100644 index 1a510128398ff..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -/** - * Generic ozone specific classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java deleted file mode 100644 index 61bdb27f4cdc8..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; - -import java.io.IOException; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.security.KerberosInfo; - -/** - * The protocol spoken between datanodes and SCM. For specifics please the - * Protoc file that defines this protocol. - */ -@KerberosInfo( - serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) -@InterfaceAudience.Private -public interface StorageContainerDatanodeProtocol { - - @SuppressWarnings("checkstyle:ConstantName") - /** - * Version 1: Initial version. - */ - long versionID = 1L; - - /** - * Returns SCM version. - * @return Version info. - */ - SCMVersionResponseProto getVersion(SCMVersionRequestProto versionRequest) - throws IOException; - - /** - * Used by data node to send a Heartbeat. - * @param heartbeat Heartbeat - * @return - SCMHeartbeatResponseProto - * @throws IOException - */ - SCMHeartbeatResponseProto sendHeartbeat(SCMHeartbeatRequestProto heartbeat) - throws IOException; - - /** - * Register Datanode. - * @param datanodeDetails - Datanode Details. - * @param nodeReport - Node Report. - * @param containerReportsRequestProto - Container Reports. - * @return SCM Command. - */ - SCMRegisteredResponseProto register( - DatanodeDetailsProto datanodeDetails, - NodeReportProto nodeReport, - ContainerReportsProto containerReportsRequestProto, - PipelineReportsProto pipelineReports) throws IOException; - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java deleted file mode 100644 index b5d75ef01cb28..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; - -import java.util.List; - -/** - * The protocol spoken between datanodes and SCM. - * - * Please note that the full protocol spoken between a datanode and SCM is - * separated into 2 interfaces. One interface that deals with node state and - * another interface that deals with containers. - * - * This interface has functions that deals with the state of datanode. - */ -@InterfaceAudience.Private -public interface StorageContainerNodeProtocol { - /** - * Gets the version info from SCM. - * @param versionRequest - version Request. - * @return - returns SCM version info and other required information needed - * by datanode. - */ - VersionResponse getVersion(SCMVersionRequestProto versionRequest); - - /** - * Register the node if the node finds that it is not registered with any SCM. - * @param datanodeDetails DatanodeDetails - * @param nodeReport NodeReportProto - * @param pipelineReport PipelineReportsProto - * @return SCMHeartbeatResponseProto - */ - RegisteredCommand register(DatanodeDetails datanodeDetails, - NodeReportProto nodeReport, - PipelineReportsProto pipelineReport); - - /** - * Send heartbeat to indicate the datanode is alive and doing well. - * @param datanodeDetails - Datanode ID. - * @return SCMheartbeat response list - */ - List processHeartbeat(DatanodeDetails datanodeDetails); - - /** - * Check if node is registered or not. - * Return true if Node is registered and false otherwise. - * @param datanodeDetails - Datanode ID. - * @return true if Node is registered, false otherwise - */ - Boolean isNodeRegistered(DatanodeDetails datanodeDetails); - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java deleted file mode 100644 index 4d328d3d1e6c1..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java +++ /dev/null @@ -1,154 +0,0 @@ - -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.protocol; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; - -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * Version response class. - */ -public class VersionResponse { - private final int version; - private final Map values; - - /** - * Creates a version response class. - * @param version - * @param values - */ - public VersionResponse(int version, Map values) { - this.version = version; - this.values = values; - } - - /** - * Creates a version Response class. - * @param version - */ - public VersionResponse(int version) { - this.version = version; - this.values = new HashMap<>(); - } - - /** - * Returns a new Builder. - * @return - Builder. - */ - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Returns this class from protobuf message. - * @param response - SCMVersionResponseProto - * @return VersionResponse - */ - public static VersionResponse getFromProtobuf(SCMVersionResponseProto - response) { - return new VersionResponse(response.getSoftwareVersion(), - response.getKeysList().stream() - .collect(Collectors.toMap(KeyValue::getKey, - KeyValue::getValue))); - } - - /** - * Adds a value to version Response. - * @param key - String - * @param value - String - */ - public void put(String key, String value) { - if (this.values.containsKey(key)) { - throw new IllegalArgumentException("Duplicate key in version response"); - } - values.put(key, value); - } - - /** - * Return a protobuf message. - * @return SCMVersionResponseProto. - */ - public SCMVersionResponseProto getProtobufMessage() { - - List list = new LinkedList<>(); - for (Map.Entry entry : values.entrySet()) { - list.add(KeyValue.newBuilder().setKey(entry.getKey()). - setValue(entry.getValue()).build()); - } - return - SCMVersionResponseProto.newBuilder() - .setSoftwareVersion(this.version) - .addAllKeys(list).build(); - } - - public String getValue(String key) { - return this.values.get(key); - } - - /** - * Builder class. - */ - public static class Builder { - private int version; - private Map values; - - Builder() { - values = new HashMap<>(); - } - - /** - * Sets the version. - * @param ver - version - * @return Builder - */ - public Builder setVersion(int ver) { - this.version = ver; - return this; - } - - /** - * Adds a value to version Response. - * @param key - String - * @param value - String - */ - public Builder addValue(String key, String value) { - if (this.values.containsKey(key)) { - throw new IllegalArgumentException("Duplicate key in version response"); - } - values.put(key, value); - return this; - } - - /** - * Builds the version response. - * @return VersionResponse. - */ - public VersionResponse build() { - return new VersionResponse(this.version, this.values); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java deleted file mode 100644 index ded0464ef4bab..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; - -/** - * Asks datanode to close a container. - */ -public class CloseContainerCommand - extends SCMCommand { - - private final PipelineID pipelineID; - private boolean force; - - public CloseContainerCommand(final long containerID, - final PipelineID pipelineID) { - this(containerID, pipelineID, false); - } - - public CloseContainerCommand(final long containerID, - final PipelineID pipelineID, boolean force) { - super(containerID); - this.pipelineID = pipelineID; - this.force = force; - } - - /** - * Returns the type of this command. - * - * @return Type - */ - @Override - public SCMCommandProto.Type getType() { - return SCMCommandProto.Type.closeContainerCommand; - } - - @Override - public CloseContainerCommandProto getProto() { - return CloseContainerCommandProto.newBuilder() - .setContainerID(getId()) - .setCmdId(getId()) - .setPipelineID(pipelineID.getProtobuf()) - .setForce(force) - .build(); - } - - public static CloseContainerCommand getFromProtobuf( - CloseContainerCommandProto closeContainerProto) { - Preconditions.checkNotNull(closeContainerProto); - return new CloseContainerCommand(closeContainerProto.getCmdId(), - PipelineID.getFromProtobuf(closeContainerProto.getPipelineID()), - closeContainerProto.getForce()); - } - - public long getContainerID() { - return getId(); - } - - public PipelineID getPipelineID() { - return pipelineID; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java deleted file mode 100644 index 66bf623093651..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import java.util.UUID; - -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload; - -/** - * Command for the datanode with the destination address. - */ -public class CommandForDatanode implements - IdentifiableEventPayload { - - private final UUID datanodeId; - - private final SCMCommand command; - - // TODO: Command for datanode should take DatanodeDetails as parameter. - public CommandForDatanode(UUID datanodeId, SCMCommand command) { - this.datanodeId = datanodeId; - this.command = command; - } - - public UUID getDatanodeId() { - return datanodeId; - } - - public SCMCommand getCommand() { - return command; - } - - public long getId() { - return command.getId(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandStatus.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandStatus.java deleted file mode 100644 index 4b3ce840dceb9..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandStatus.java +++ /dev/null @@ -1,164 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatus.Status; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; - -/** - * A class that is used to communicate status of datanode commands. - */ -public class CommandStatus { - - private SCMCommandProto.Type type; - private Long cmdId; - private Status status; - private String msg; - - CommandStatus(Type type, Long cmdId, Status status, String msg) { - this.type = type; - this.cmdId = cmdId; - this.status = status; - this.msg = msg; - } - - public Type getType() { - return type; - } - - public Long getCmdId() { - return cmdId; - } - - public Status getStatus() { - return status; - } - - public String getMsg() { - return msg; - } - - /** - * To allow change of status once commandStatus is initialized. - * - * @param status - */ - public void setStatus(Status status) { - this.status = status; - } - - public void setStatus(boolean cmdExecuted) { - setStatus(cmdExecuted ? Status.EXECUTED : Status.FAILED); - } - - /** - * Returns a CommandStatus from the protocol buffers. - * - * @param cmdStatusProto - protoBuf Message - * @return CommandStatus - */ - public CommandStatus getFromProtoBuf( - StorageContainerDatanodeProtocolProtos.CommandStatus cmdStatusProto) { - return CommandStatusBuilder.newBuilder() - .setCmdId(cmdStatusProto.getCmdId()) - .setStatus(cmdStatusProto.getStatus()) - .setType(cmdStatusProto.getType()) - .setMsg(cmdStatusProto.getMsg()) - .build(); - } - /** - * Returns a CommandStatus from the protocol buffers. - * - * @return StorageContainerDatanodeProtocolProtos.CommandStatus - */ - public StorageContainerDatanodeProtocolProtos.CommandStatus - getProtoBufMessage() { - StorageContainerDatanodeProtocolProtos.CommandStatus.Builder builder = - StorageContainerDatanodeProtocolProtos.CommandStatus.newBuilder() - .setCmdId(this.getCmdId()) - .setStatus(this.getStatus()) - .setType(this.getType()); - if (this.getMsg() != null) { - builder.setMsg(this.getMsg()); - } - return builder.build(); - } - - /** - * Builder class for CommandStatus. - */ - public static class CommandStatusBuilder { - - private SCMCommandProto.Type type; - private Long cmdId; - private StorageContainerDatanodeProtocolProtos.CommandStatus.Status status; - private String msg; - - CommandStatusBuilder() { - } - - public static CommandStatusBuilder newBuilder() { - return new CommandStatusBuilder(); - } - - public Type getType() { - return type; - } - - public Long getCmdId() { - return cmdId; - } - - public Status getStatus() { - return status; - } - - public String getMsg() { - return msg; - } - - public CommandStatusBuilder setType(Type commandType) { - this.type = commandType; - return this; - } - - public CommandStatusBuilder setCmdId(Long commandId) { - this.cmdId = commandId; - return this; - } - - public CommandStatusBuilder setStatus(Status commandStatus) { - this.status = commandStatus; - return this; - } - - public CommandStatusBuilder setMsg(String message) { - this.msg = message; - return this; - } - - public CommandStatus build() { - return new CommandStatus(type, cmdId, status, msg); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlockCommandStatus.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlockCommandStatus.java deleted file mode 100644 index e9ccb08a14113..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlockCommandStatus.java +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; - -/** - * Command status to report about block deletion. - */ -public class DeleteBlockCommandStatus extends CommandStatus { - - private ContainerBlocksDeletionACKProto blocksDeletionAck = null; - - public DeleteBlockCommandStatus(Type type, Long cmdId, - StorageContainerDatanodeProtocolProtos.CommandStatus.Status status, - String msg, - ContainerBlocksDeletionACKProto blocksDeletionAck) { - super(type, cmdId, status, msg); - this.blocksDeletionAck = blocksDeletionAck; - } - - public void setBlocksDeletionAck( - ContainerBlocksDeletionACKProto deletionAck) { - blocksDeletionAck = deletionAck; - } - - @Override - public CommandStatus getFromProtoBuf( - StorageContainerDatanodeProtocolProtos.CommandStatus cmdStatusProto) { - return DeleteBlockCommandStatusBuilder.newBuilder() - .setBlockDeletionAck(cmdStatusProto.getBlockDeletionAck()) - .setCmdId(cmdStatusProto.getCmdId()) - .setStatus(cmdStatusProto.getStatus()) - .setType(cmdStatusProto.getType()) - .setMsg(cmdStatusProto.getMsg()) - .build(); - } - - @Override - public StorageContainerDatanodeProtocolProtos.CommandStatus - getProtoBufMessage() { - StorageContainerDatanodeProtocolProtos.CommandStatus.Builder builder = - StorageContainerDatanodeProtocolProtos.CommandStatus.newBuilder() - .setCmdId(this.getCmdId()) - .setStatus(this.getStatus()) - .setType(this.getType()); - if (blocksDeletionAck != null) { - builder.setBlockDeletionAck(blocksDeletionAck); - } - if (this.getMsg() != null) { - builder.setMsg(this.getMsg()); - } - return builder.build(); - } - - /** - * Builder for DeleteBlockCommandStatus. - */ - public static final class DeleteBlockCommandStatusBuilder - extends CommandStatusBuilder { - private ContainerBlocksDeletionACKProto blocksDeletionAck = null; - - public static DeleteBlockCommandStatusBuilder newBuilder() { - return new DeleteBlockCommandStatusBuilder(); - } - - public DeleteBlockCommandStatusBuilder setBlockDeletionAck( - ContainerBlocksDeletionACKProto deletionAck) { - this.blocksDeletionAck = deletionAck; - return this; - } - - @Override - public CommandStatus build() { - return new DeleteBlockCommandStatus(getType(), getCmdId(), getStatus(), - getMsg(), blocksDeletionAck); - } - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java deleted file mode 100644 index 03a876cee3497..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeleteBlocksCommandProto; - -import java.util.List; - -/** - * A SCM command asks a datanode to delete a number of blocks. - */ -public class DeleteBlocksCommand extends - SCMCommand { - - private List blocksTobeDeleted; - - - public DeleteBlocksCommand(List blocks) { - super(); - this.blocksTobeDeleted = blocks; - } - - // Should be called only for protobuf conversion - private DeleteBlocksCommand(List blocks, - long id) { - super(id); - this.blocksTobeDeleted = blocks; - } - - public List blocksTobeDeleted() { - return this.blocksTobeDeleted; - } - - @Override - public SCMCommandProto.Type getType() { - return SCMCommandProto.Type.deleteBlocksCommand; - } - - public static DeleteBlocksCommand getFromProtobuf( - DeleteBlocksCommandProto deleteBlocksProto) { - return new DeleteBlocksCommand(deleteBlocksProto - .getDeletedBlocksTransactionsList(), deleteBlocksProto.getCmdId()); - } - - @Override - public DeleteBlocksCommandProto getProto() { - return DeleteBlocksCommandProto.newBuilder() - .setCmdId(getId()) - .addAllDeletedBlocksTransactions(blocksTobeDeleted).build(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteContainerCommand.java deleted file mode 100644 index 48aa83bcc8c03..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteContainerCommand.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.protocol.commands; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeleteContainerCommandProto; - -/** - * SCM command which tells the datanode to delete a container. - */ -public class DeleteContainerCommand extends - SCMCommand { - - private final long containerId; - private final boolean force; - - /** - * DeleteContainerCommand, to send a command for datanode to delete a - * container. - * @param containerId - */ - public DeleteContainerCommand(long containerId) { - this(containerId, false); - } - - /** - * DeleteContainerCommand, to send a command for datanode to delete a - * container. - * @param containerId - * @param forceFlag if this is set to true, we delete container without - * checking state of the container. - */ - - public DeleteContainerCommand(long containerId, boolean forceFlag) { - this.containerId = containerId; - this.force = forceFlag; - } - - @Override - public SCMCommandProto.Type getType() { - return SCMCommandProto.Type.deleteContainerCommand; - } - - @Override - public DeleteContainerCommandProto getProto() { - DeleteContainerCommandProto.Builder builder = - DeleteContainerCommandProto.newBuilder(); - builder.setCmdId(getId()) - .setContainerID(getContainerID()).setForce(force); - return builder.build(); - } - - public long getContainerID() { - return containerId; - } - - public boolean isForce() { - return force; - } - - public static DeleteContainerCommand getFromProtobuf( - DeleteContainerCommandProto protoMessage) { - Preconditions.checkNotNull(protoMessage); - return new DeleteContainerCommand(protoMessage.getContainerID(), - protoMessage.getForce()); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java deleted file mode 100644 index 42778cb6e4959..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java +++ /dev/null @@ -1,164 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import com.google.common.base.Strings; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto - .ErrorCode; - -/** - * Response to Datanode Register call. - */ -public class RegisteredCommand { - private String clusterID; - private ErrorCode error; - private DatanodeDetails datanode; - - public RegisteredCommand(final ErrorCode error, final DatanodeDetails node, - final String clusterID) { - this.datanode = node; - this.clusterID = clusterID; - this.error = error; - } - - /** - * Returns a new builder. - * - * @return - Builder - */ - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Returns datanode. - * - * @return - Datanode. - */ - public DatanodeDetails getDatanode() { - return datanode; - } - - /** - * Returns cluster ID. - * - * @return -- ClusterID - */ - public String getClusterID() { - return clusterID; - } - - /** - * Returns ErrorCode. - * - * @return - ErrorCode - */ - public ErrorCode getError() { - return error; - } - - /** - * Gets the protobuf message of this object. - * - * @return A protobuf message. - */ - public SCMRegisteredResponseProto getProtoBufMessage() { - SCMRegisteredResponseProto.Builder builder = - SCMRegisteredResponseProto.newBuilder() - // TODO : Fix this later when we have multiple SCM support. - // .setAddressList(addressList) - .setClusterID(this.clusterID) - .setDatanodeUUID(this.datanode.getUuidString()) - .setErrorCode(this.error); - if (!Strings.isNullOrEmpty(datanode.getHostName())) { - builder.setHostname(datanode.getHostName()); - } - if (!Strings.isNullOrEmpty(datanode.getIpAddress())) { - builder.setIpAddress(datanode.getIpAddress()); - } - if (!Strings.isNullOrEmpty(datanode.getNetworkName())) { - builder.setNetworkName(datanode.getNetworkName()); - } - if (!Strings.isNullOrEmpty(datanode.getNetworkLocation())) { - builder.setNetworkLocation(datanode.getNetworkLocation()); - } - - return builder.build(); - } - - /** - * A builder class to verify all values are sane. - */ - public static class Builder { - private DatanodeDetails datanode; - private String clusterID; - private ErrorCode error; - - /** - * sets datanode details. - * - * @param node - datanode details - * @return Builder - */ - public Builder setDatanode(DatanodeDetails node) { - this.datanode = node; - return this; - } - - /** - * Sets cluster ID. - * - * @param cluster - clusterID - * @return Builder - */ - public Builder setClusterID(String cluster) { - this.clusterID = cluster; - return this; - } - - /** - * Sets Error code. - * - * @param errorCode - error code - * @return Builder - */ - public Builder setErrorCode(ErrorCode errorCode) { - this.error = errorCode; - return this; - } - - /** - * Build the command object. - * - * @return RegisteredCommand - */ - public RegisteredCommand build() { - if ((this.error == ErrorCode.success) && (this.datanode == null - || Strings.isNullOrEmpty(this.datanode.getUuidString()) - || Strings.isNullOrEmpty(this.clusterID))) { - throw new IllegalArgumentException("On success, RegisteredCommand " - + "needs datanodeUUID and ClusterID."); - } - return new RegisteredCommand(this.error, this.datanode, this.clusterID); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java deleted file mode 100644 index e663bed794f32..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import java.util.List; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto - .Builder; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; - -import com.google.common.base.Preconditions; - -/** - * SCM command to request replication of a container. - */ -public class ReplicateContainerCommand - extends SCMCommand { - - private final long containerID; - private final List sourceDatanodes; - - public ReplicateContainerCommand(long containerID, - List sourceDatanodes) { - super(); - this.containerID = containerID; - this.sourceDatanodes = sourceDatanodes; - } - - // Should be called only for protobuf conversion - public ReplicateContainerCommand(long containerID, - List sourceDatanodes, long id) { - super(id); - this.containerID = containerID; - this.sourceDatanodes = sourceDatanodes; - } - - @Override - public Type getType() { - return SCMCommandProto.Type.replicateContainerCommand; - } - - @Override - public ReplicateContainerCommandProto getProto() { - Builder builder = ReplicateContainerCommandProto.newBuilder() - .setCmdId(getId()) - .setContainerID(containerID); - for (DatanodeDetails dd : sourceDatanodes) { - builder.addSources(dd.getProtoBufMessage()); - } - return builder.build(); - } - - public static ReplicateContainerCommand getFromProtobuf( - ReplicateContainerCommandProto protoMessage) { - Preconditions.checkNotNull(protoMessage); - - List datanodeDetails = - protoMessage.getSourcesList() - .stream() - .map(DatanodeDetails::getFromProtoBuf) - .collect(Collectors.toList()); - - return new ReplicateContainerCommand(protoMessage.getContainerID(), - datanodeDetails, protoMessage.getCmdId()); - - } - - public long getContainerID() { - return containerID; - } - - public List getSourceDatanodes() { - return sourceDatanodes; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java deleted file mode 100644 index e3ea4aeeaff64..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; - -import static org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ReregisterCommandProto; - -/** - * Informs a datanode to register itself with SCM again. - */ -public class ReregisterCommand extends - SCMCommand{ - - /** - * Returns the type of this command. - * - * @return Type - */ - @Override - public SCMCommandProto.Type getType() { - return SCMCommandProto.Type.reregisterCommand; - } - - /** - * Not implemented for ReregisterCommand. - * - * @return cmdId. - */ - @Override - public long getId() { - return 0; - } - - @Override - public ReregisterCommandProto getProto() { - return ReregisterCommandProto - .newBuilder() - .build(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java deleted file mode 100644 index 3c4e05b424af2..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.hdds.HddsIdFactory; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload; - -/** - * A class that acts as the base class to convert between Java and SCM - * commands in protobuf format. - * @param - */ -public abstract class SCMCommand implements - IdentifiableEventPayload { - private long id; - - SCMCommand() { - this.id = HddsIdFactory.getLongId(); - } - - SCMCommand(long id) { - this.id = id; - } - /** - * Returns the type of this command. - * @return Type - */ - public abstract SCMCommandProto.Type getType(); - - /** - * Gets the protobuf message of this object. - * @return A protobuf message. - */ - public abstract T getProto(); - - /** - * Gets the commandId of this object. - * @return uuid. - */ - public long getId() { - return id; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java deleted file mode 100644 index 7083c1b154d68..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; -/** - Set of classes that help in protoc conversions. - **/ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java deleted file mode 100644 index a718fa7476fa0..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.protocol; - -/** - * This package contains classes for HDDS protocol definitions. - */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java deleted file mode 100644 index 9b446666e5d11..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java +++ /dev/null @@ -1,177 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; - -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeRequest; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeRequest.Builder; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeResponse; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.Type; -import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtocolTranslator; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; - -import java.io.Closeable; -import java.io.IOException; -import java.util.function.Consumer; - -/** - * This class is the client-side translator to translate the requests made on - * the {@link StorageContainerDatanodeProtocol} interface to the RPC server - * implementing {@link StorageContainerDatanodeProtocolPB}. - */ -public class StorageContainerDatanodeProtocolClientSideTranslatorPB - implements StorageContainerDatanodeProtocol, ProtocolTranslator, Closeable { - - /** - * RpcController is not used and hence is set to null. - */ - private static final RpcController NULL_RPC_CONTROLLER = null; - private final StorageContainerDatanodeProtocolPB rpcProxy; - - /** - * Constructs a Client side interface that calls into SCM datanode protocol. - * - * @param rpcProxy - Proxy for RPC. - */ - public StorageContainerDatanodeProtocolClientSideTranslatorPB( - StorageContainerDatanodeProtocolPB rpcProxy) { - this.rpcProxy = rpcProxy; - } - - /** - * Closes this stream and releases any system resources associated with it. If - * the stream is already closed then invoking this method has no effect. - *

- *

As noted in {@link AutoCloseable#close()}, cases where the close may - * fail require careful attention. It is strongly advised to relinquish the - * underlying resources and to internally mark the {@code Closeable} - * as closed, prior to throwing the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - RPC.stopProxy(rpcProxy); - } - - /** - * Return the proxy object underlying this protocol translator. - * - * @return the proxy object underlying this protocol translator. - */ - @Override - public Object getUnderlyingProxyObject() { - return rpcProxy; - } - - /** - * Helper method to wrap the request and send the message. - */ - private SCMDatanodeResponse submitRequest(Type type, - Consumer builderConsumer) throws IOException { - final SCMDatanodeResponse response; - try { - Builder builder = SCMDatanodeRequest.newBuilder() - .setCmdType(type); - builderConsumer.accept(builder); - SCMDatanodeRequest wrapper = builder.build(); - - response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper); - } catch (ServiceException ex) { - throw ProtobufHelper.getRemoteException(ex); - } - return response; - } - - /** - * Returns SCM version. - * - * @param unused - set to null and unused. - * @return Version info. - */ - @Override - public SCMVersionResponseProto getVersion(SCMVersionRequestProto - request) throws IOException { - return submitRequest(Type.GetVersion, - (builder) -> builder - .setGetVersionRequest(SCMVersionRequestProto.newBuilder().build())) - .getGetVersionResponse(); - } - - /** - * Send by datanode to SCM. - * - * @param heartbeat node heartbeat - * @throws IOException - */ - - @Override - public SCMHeartbeatResponseProto sendHeartbeat( - SCMHeartbeatRequestProto heartbeat) throws IOException { - return submitRequest(Type.SendHeartbeat, - (builder) -> builder.setSendHeartbeatRequest(heartbeat)) - .getSendHeartbeatResponse(); - } - - /** - * Register Datanode. - * - * @param datanodeDetailsProto - Datanode Details - * @param nodeReport - Node Report. - * @param containerReportsRequestProto - Container Reports. - * @return SCM Command. - */ - @Override - public SCMRegisteredResponseProto register( - DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport, - ContainerReportsProto containerReportsRequestProto, - PipelineReportsProto pipelineReportsProto) - throws IOException { - SCMRegisterRequestProto.Builder req = - SCMRegisterRequestProto.newBuilder(); - req.setDatanodeDetails(datanodeDetailsProto); - req.setContainerReport(containerReportsRequestProto); - req.setPipelineReports(pipelineReportsProto); - req.setNodeReport(nodeReport); - return submitRequest(Type.Register, - (builder) -> builder.setRegisterRequest(req)) - .getRegisterResponse(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java deleted file mode 100644 index 9006e9175acba..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos - .StorageContainerDatanodeProtocolService; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.ipc.ProtocolInfo; -import org.apache.hadoop.security.KerberosInfo; - -/** - * Protocol used from a datanode to StorageContainerManager. This extends - * the Protocol Buffers service interface to add Hadoop-specific annotations. - */ - -@ProtocolInfo(protocolName = - "org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol", - protocolVersion = 1) -@KerberosInfo( - serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY, - clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY) -public interface StorageContainerDatanodeProtocolPB extends - StorageContainerDatanodeProtocolService.BlockingInterface { -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java deleted file mode 100644 index ed704ebf4310c..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -import java.io.IOException; - -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeRequest; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeResponse; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.Status; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.Type; -import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher; -import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; - -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class is the server-side translator that forwards requests received on - * {@link StorageContainerDatanodeProtocolPB} to the {@link - * StorageContainerDatanodeProtocol} server implementation. - */ -public class StorageContainerDatanodeProtocolServerSideTranslatorPB - implements StorageContainerDatanodeProtocolPB { - - private static final Logger LOG = LoggerFactory - .getLogger(StorageContainerDatanodeProtocolServerSideTranslatorPB.class); - - private final StorageContainerDatanodeProtocol impl; - private final OzoneProtocolMessageDispatcher dispatcher; - - public StorageContainerDatanodeProtocolServerSideTranslatorPB( - StorageContainerDatanodeProtocol impl, - ProtocolMessageMetrics protocolMessageMetrics) { - this.impl = impl; - dispatcher = - new OzoneProtocolMessageDispatcher<>("SCMDatanodeProtocol", - protocolMessageMetrics, - LOG); - } - - public SCMRegisteredResponseProto register( - SCMRegisterRequestProto request) throws IOException { - ContainerReportsProto containerRequestProto = request - .getContainerReport(); - NodeReportProto dnNodeReport = request.getNodeReport(); - PipelineReportsProto pipelineReport = request.getPipelineReports(); - return impl.register(request.getDatanodeDetails(), dnNodeReport, - containerRequestProto, pipelineReport); - - } - - @Override - public SCMDatanodeResponse submitRequest(RpcController controller, - SCMDatanodeRequest request) throws ServiceException { - return dispatcher.processRequest(request, this::processMessage, - request.getCmdType(), request.getTraceID()); - } - - public SCMDatanodeResponse processMessage(SCMDatanodeRequest request) - throws ServiceException { - try { - Type cmdType = request.getCmdType(); - switch (cmdType) { - case GetVersion: - return SCMDatanodeResponse.newBuilder() - .setCmdType(cmdType) - .setStatus(Status.OK) - .setGetVersionResponse( - impl.getVersion(request.getGetVersionRequest())) - .build(); - case SendHeartbeat: - return SCMDatanodeResponse.newBuilder() - .setCmdType(cmdType) - .setStatus(Status.OK) - .setSendHeartbeatResponse( - impl.sendHeartbeat(request.getSendHeartbeatRequest())) - .build(); - case Register: - return SCMDatanodeResponse.newBuilder() - .setCmdType(cmdType) - .setStatus(Status.OK) - .setRegisterResponse(register(request.getRegisterRequest())) - .build(); - default: - throw new ServiceException("Unknown command type: " + cmdType); - } - } catch (IOException e) { - throw new ServiceException(e); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java deleted file mode 100644 index 378a8f389cf5a..0000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.protocolPB; diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto deleted file mode 100644 index a975cd5605fc7..0000000000000 --- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto +++ /dev/null @@ -1,429 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and unstable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *unstable* .proto interface. - */ - -option java_package = "org.apache.hadoop.hdds.protocol.proto"; - -option java_outer_classname = "StorageContainerDatanodeProtocolProtos"; - -option java_generic_services = true; - -option java_generate_equals_and_hash = true; - -package hadoop.hdds; - -import "hdds.proto"; - - -message SCMDatanodeRequest { - required Type cmdType = 1; // Type of the command - - optional string traceID = 2; - - optional SCMVersionRequestProto getVersionRequest = 3; - optional SCMRegisterRequestProto registerRequest = 4; - optional SCMHeartbeatRequestProto sendHeartbeatRequest = 5; -} - -message SCMDatanodeResponse { - required Type cmdType = 1; // Type of the command - - optional string traceID = 2; - - optional bool success = 3 [default = true]; - - optional string message = 4; - - required Status status = 5; - - optional SCMVersionResponseProto getVersionResponse = 6; - optional SCMRegisteredResponseProto registerResponse = 7; - optional SCMHeartbeatResponseProto sendHeartbeatResponse = 8; - -} - -enum Type { - GetVersion = 1; - Register = 2; - SendHeartbeat = 3; -} - -enum Status { - OK = 1; - ERROR = 2; -} - -/** - * Request for version info of the software stack on the server. - */ -message SCMVersionRequestProto {} - -/** -* Generic response that is send to a version request. This allows keys to be -* added on the fly and protocol to remain stable. -*/ -message SCMVersionResponseProto { - required uint32 softwareVersion = 1; - repeated hadoop.hdds.KeyValue keys = 2; -} - -message SCMRegisterRequestProto { - required DatanodeDetailsProto datanodeDetails = 1; - required NodeReportProto nodeReport = 2; - required ContainerReportsProto containerReport = 3; - required PipelineReportsProto pipelineReports = 4; -} - -/** - * Datanode ID returned by the SCM. This is similar to name node - * registeration of a datanode. - */ -message SCMRegisteredResponseProto { - enum ErrorCode { - success = 1; - errorNodeNotPermitted = 2; - } - required ErrorCode errorCode = 1; - required string datanodeUUID = 2; - required string clusterID = 3; - optional SCMNodeAddressList addressList = 4; - optional string hostname = 5; - optional string ipAddress = 6; - optional string networkName = 7; - optional string networkLocation = 8; -} - -/** -* This message is send by data node to indicate that it is alive or it is -* registering with the node manager. -*/ -message SCMHeartbeatRequestProto { - required DatanodeDetailsProto datanodeDetails = 1; - optional NodeReportProto nodeReport = 2; - optional ContainerReportsProto containerReport = 3; - repeated IncrementalContainerReportProto incrementalContainerReport = 4; - repeated CommandStatusReportsProto commandStatusReports = 5; - optional ContainerActionsProto containerActions = 6; - optional PipelineActionsProto pipelineActions = 7; - optional PipelineReportsProto pipelineReports = 8; -} - -/* - * A group of commands for the datanode to execute - */ -message SCMHeartbeatResponseProto { - required string datanodeUUID = 1; - repeated SCMCommandProto commands = 2; -} - -message SCMNodeAddressList { - repeated string addressList = 1; -} - -/** -* This message is send along with the heart beat to report datanode -* storage utilization to SCM. -*/ -message NodeReportProto { - repeated StorageReportProto storageReport = 1; -} - -message StorageReportProto { - required string storageUuid = 1; - required string storageLocation = 2; - optional uint64 capacity = 3 [default = 0]; - optional uint64 scmUsed = 4 [default = 0]; - optional uint64 remaining = 5 [default = 0]; - optional StorageTypeProto storageType = 6 [default = DISK]; - optional bool failed = 7 [default = false]; -} - -/** - * Types of recognized storage media. - */ -enum StorageTypeProto { - DISK = 1; - SSD = 2; - ARCHIVE = 3; - RAM_DISK = 4; - PROVIDED = 5; -} - -message ContainerReportsProto { - repeated ContainerReplicaProto reports = 1; -} - -message IncrementalContainerReportProto { - repeated ContainerReplicaProto report = 1; -} - -message ContainerReplicaProto { - enum State { - OPEN = 1; - CLOSING = 2; - QUASI_CLOSED = 3; - CLOSED = 4; - UNHEALTHY = 5; - INVALID = 6; - } - required int64 containerID = 1; - required State state = 2; - optional int64 size = 3; - optional int64 used = 4; - optional int64 keyCount = 5; - optional int64 readCount = 6; - optional int64 writeCount = 7; - optional int64 readBytes = 8; - optional int64 writeBytes = 9; - optional string finalhash = 10; - optional int64 deleteTransactionId = 11; - optional uint64 blockCommitSequenceId = 12; - optional string originNodeId = 13; -} - -message CommandStatusReportsProto { - repeated CommandStatus cmdStatus = 1; -} - -message CommandStatus { - enum Status { - PENDING = 1; - EXECUTED = 2; - FAILED = 3; - } - required int64 cmdId = 1; - required Status status = 2 [default = PENDING]; - required SCMCommandProto.Type type = 3; - optional string msg = 4; - optional ContainerBlocksDeletionACKProto blockDeletionAck = 5; -} - -message ContainerActionsProto { - repeated ContainerAction containerActions = 1; -} - -message ContainerAction { - enum Action { - CLOSE = 1; - } - - enum Reason { - CONTAINER_FULL = 1; - CONTAINER_UNHEALTHY = 2; - } - - required int64 containerID = 1; - required Action action = 2; - optional Reason reason = 3; -} - -message PipelineReport { - required PipelineID pipelineID = 1; -} - -message PipelineReportsProto { - repeated PipelineReport pipelineReport = 1; -} - -message PipelineActionsProto { - repeated PipelineAction pipelineActions = 1; -} - -message ClosePipelineInfo { - enum Reason { - PIPELINE_FAILED = 1; - PIPELINE_LOG_FAILED = 2; - STATEMACHINE_TRANSACTION_FAILED = 3; - } - required PipelineID pipelineID = 1; - optional Reason reason = 3; - optional string detailedReason = 4; -} - -message PipelineAction { - enum Action { - CLOSE = 1; - } - - /** - * Action will be used to identify the correct pipeline action. - */ - required Action action = 1; - optional ClosePipelineInfo closePipeline = 2; -} - -/* - * These are commands returned by SCM for to the datanode to execute. - */ -message SCMCommandProto { - enum Type { - reregisterCommand = 1; - deleteBlocksCommand = 2; - closeContainerCommand = 3; - deleteContainerCommand = 4; - replicateContainerCommand = 5; - } - // TODO: once we start using protoc 3.x, refactor this message using "oneof" - required Type commandType = 1; - optional ReregisterCommandProto reregisterCommandProto = 2; - optional DeleteBlocksCommandProto deleteBlocksCommandProto = 3; - optional CloseContainerCommandProto closeContainerCommandProto = 4; - optional DeleteContainerCommandProto deleteContainerCommandProto = 5; - optional ReplicateContainerCommandProto replicateContainerCommandProto = 6; -} - -/** - * SCM informs a datanode to register itself again. - * With recieving this command, datanode will transit to REGISTER state. - */ -message ReregisterCommandProto {} - - -// HB response from SCM, contains a list of block deletion transactions. -message DeleteBlocksCommandProto { - repeated DeletedBlocksTransaction deletedBlocksTransactions = 1; - required int64 cmdId = 3; -} - -// The deleted blocks which are stored in deletedBlock.db of scm. -// We don't use BlockID because this only contians multiple localIDs -// of the same containerID. -message DeletedBlocksTransaction { - required int64 txID = 1; - required int64 containerID = 2; - repeated int64 localID = 3; - // the retry time of sending deleting command to datanode. - required int32 count = 4; -} - -// ACK message datanode sent to SCM, contains the result of -// block deletion transactions. -message ContainerBlocksDeletionACKProto { - message DeleteBlockTransactionResult { - required int64 txID = 1; - required int64 containerID = 2; - required bool success = 3; - } - repeated DeleteBlockTransactionResult results = 1; - required string dnId = 2; -} - -/** -This command asks the datanode to close a specific container. -*/ -message CloseContainerCommandProto { - required int64 containerID = 1; - required PipelineID pipelineID = 2; - // cmdId will be removed - required int64 cmdId = 3; - // Force will be used when closing a container out side of ratis. - optional bool force = 4 [default = false]; -} - -/** -This command asks the datanode to delete a specific container. -*/ -message DeleteContainerCommandProto { - required int64 containerID = 1; - required int64 cmdId = 2; - required bool force = 3 [default = false]; -} - -/** -This command asks the datanode to replicate a container from specific sources. -*/ -message ReplicateContainerCommandProto { - required int64 containerID = 1; - repeated DatanodeDetailsProto sources = 2; - required int64 cmdId = 3; -} - -/** - * Protocol used from a datanode to StorageContainerManager. - * - * Please see the request and response messages for details of the RPC calls. - * - * Here is a simple state diagram that shows how a datanode would boot up and - * communicate with SCM. - * - * ----------------------- - * | Start | - * ---------- ------------ - * | - * | - * | - * | - * | - * | - * | - * ----------v------------- - * | Searching for SCM ------------ - * ---------- ------------- | - * | | - * | | - * | ----------v------------- - * | | Register if needed | - * | ----------- ------------ - * | | - * v | - * ----------- ---------------- | - * --------- Heartbeat state <-------- - * | --------^------------------- - * | | - * | | - * | | - * | | - * | | - * | | - * | | - * ------------------ - * - * - * - * Here is how this protocol is used by the datanode. When a datanode boots up - * it moves into a stated called SEARCHING_SCM. In this state datanode is - * trying to establish communication with the SCM. The address of the SCMs are - * retrieved from the configuration information. - * - * In the SEARCHING_SCM state, only rpc call made by datanode is a getVersion - * call to SCM. Once any of the SCMs reply, datanode checks if it has a local - * persisted datanode ID. If it has this means that this datanode is already - * registered with some SCM. If this file is not found, datanode assumes that - * it needs to do a registration. - * - * If registration is need datanode moves into REGISTER state. It will - * send a register call with DatanodeDetailsProto data structure and presist - * that info. - * - * The response to the command contains clusterID. This information is - * also persisted by the datanode and moves into heartbeat state. - * - * Once in the heartbeat state, datanode sends heartbeats and container reports - * to SCM and process commands issued by SCM until it is shutdown. - * - */ -service StorageContainerDatanodeProtocolService { - - //Message sent from Datanode to SCM as a heartbeat. - rpc submitRequest (SCMDatanodeRequest) returns (SCMDatanodeResponse); -} diff --git a/hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider b/hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider deleted file mode 100644 index 2e103fea7b7c3..0000000000000 --- a/hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainerProvider diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep deleted file mode 100644 index ff1232e5fcaa0..0000000000000 --- a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep +++ /dev/null @@ -1,17 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java deleted file mode 100644 index af56d0643d5b4..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; - -import java.io.File; -import java.io.IOException; - -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.ServicePlugin; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -/** - * Test class for {@link HddsDatanodeService}. - */ -public class TestHddsDatanodeService { - private File testDir; - private OzoneConfiguration conf; - private HddsDatanodeService service; - private String[] args = new String[] {}; - - @Before - public void setUp() { - testDir = GenericTestUtils.getRandomizedTestDir(); - conf = new OzoneConfiguration(); - conf.setBoolean(OZONE_ENABLED, true); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath()); - conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY, MockService.class, - ServicePlugin.class); - - String volumeDir = testDir + "/disk1"; - conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, volumeDir); - } - - @After - public void tearDown() { - FileUtil.fullyDelete(testDir); - } - - @Test - public void testStartup() throws IOException { - service = HddsDatanodeService.createHddsDatanodeService(args); - service.start(conf); - - assertNotNull(service.getDatanodeDetails()); - assertNotNull(service.getDatanodeDetails().getHostName()); - assertFalse(service.getDatanodeStateMachine().isDaemonStopped()); - - service.stop(); - service.join(); - service.close(); - } - - static class MockService implements ServicePlugin { - - @Override - public void close() throws IOException { - // Do nothing - } - - @Override - public void start(Object arg0) { - // Do nothing - } - - @Override - public void stop() { - // Do nothing - } - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java deleted file mode 100644 index 04fd3a499aa0a..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java +++ /dev/null @@ -1,274 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.util.ServicePlugin; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.nio.file.Paths; -import java.security.KeyPair; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.cert.X509Certificate; -import java.util.concurrent.Callable; - -import static org.apache.hadoop.ozone.HddsDatanodeService.getLogger; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; - -/** - * Test class for {@link HddsDatanodeService}. - */ -public class TestHddsSecureDatanodeInit { - - private static File testDir; - private static OzoneConfiguration conf; - private static HddsDatanodeService service; - private static String[] args = new String[]{}; - private static PrivateKey privateKey; - private static PublicKey publicKey; - private static GenericTestUtils.LogCapturer dnLogs; - private static CertificateClient client; - private static SecurityConfig securityConfig; - private static KeyCodec keyCodec; - private static CertificateCodec certCodec; - private static X509CertificateHolder certHolder; - private final static String DN_COMPONENT = DNCertificateClient.COMPONENT_NAME; - - @BeforeClass - public static void setUp() throws Exception { - testDir = GenericTestUtils.getRandomizedTestDir(); - conf = new OzoneConfiguration(); - conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath()); - //conf.set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost"); - String volumeDir = testDir + "/disk1"; - conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, volumeDir); - - conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); - conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY, - TestHddsDatanodeService.MockService.class, - ServicePlugin.class); - securityConfig = new SecurityConfig(conf); - - service = HddsDatanodeService.createHddsDatanodeService(args); - dnLogs = GenericTestUtils.LogCapturer.captureLogs(getLogger()); - callQuietly(() -> { - service.start(conf); - return null; - }); - callQuietly(() -> { - service.initializeCertificateClient(conf); - return null; - }); - certCodec = new CertificateCodec(securityConfig, DN_COMPONENT); - keyCodec = new KeyCodec(securityConfig, DN_COMPONENT); - dnLogs.clearOutput(); - privateKey = service.getCertificateClient().getPrivateKey(); - publicKey = service.getCertificateClient().getPublicKey(); - X509Certificate x509Certificate = null; - - x509Certificate = KeyStoreTestUtil.generateCertificate( - "CN=Test", new KeyPair(publicKey, privateKey), 10, - securityConfig.getSignatureAlgo()); - certHolder = new X509CertificateHolder(x509Certificate.getEncoded()); - - } - - @AfterClass - public static void tearDown() { - FileUtil.fullyDelete(testDir); - } - - @Before - public void setUpDNCertClient(){ - - FileUtils.deleteQuietly(Paths.get( - securityConfig.getKeyLocation(DN_COMPONENT).toString(), - securityConfig.getPrivateKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - securityConfig.getKeyLocation(DN_COMPONENT).toString(), - securityConfig.getPublicKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get(securityConfig - .getCertificateLocation(DN_COMPONENT).toString(), - securityConfig.getCertificateFileName()).toFile()); - dnLogs.clearOutput(); - client = new DNCertificateClient(securityConfig, - certHolder.getSerialNumber().toString()); - service.setCertificateClient(client); - } - - @Test - public void testSecureDnStartupCase0() throws Exception { - - // Case 0: When keypair as well as certificate is missing. Initial keypair - // boot-up. Get certificate will fail as no SCM is not running. - LambdaTestUtils.intercept(Exception.class, "", - () -> service.initializeCertificateClient(conf)); - - Assert.assertNotNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: GETCERT")); - } - - @Test - public void testSecureDnStartupCase1() throws Exception { - // Case 1: When only certificate is present. - - certCodec.writeCertificate(certHolder); - LambdaTestUtils.intercept(RuntimeException.class, "DN security" + - " initialization failed", - () -> service.initializeCertificateClient(conf)); - Assert.assertNull(client.getPrivateKey()); - Assert.assertNull(client.getPublicKey()); - Assert.assertNotNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: FAILURE")); - } - - @Test - public void testSecureDnStartupCase2() throws Exception { - // Case 2: When private key and certificate is missing. - keyCodec.writePublicKey(publicKey); - LambdaTestUtils.intercept(RuntimeException.class, "DN security" + - " initialization failed", - () -> service.initializeCertificateClient(conf)); - Assert.assertNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: FAILURE")); - } - - @Test - public void testSecureDnStartupCase3() throws Exception { - // Case 3: When only public key and certificate is present. - keyCodec.writePublicKey(publicKey); - certCodec.writeCertificate(certHolder); - LambdaTestUtils.intercept(RuntimeException.class, "DN security" + - " initialization failed", - () -> service.initializeCertificateClient(conf)); - Assert.assertNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNotNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: FAILURE")); - } - - @Test - public void testSecureDnStartupCase4() throws Exception { - // Case 4: When public key as well as certificate is missing. - keyCodec.writePrivateKey(privateKey); - LambdaTestUtils.intercept(RuntimeException.class, " DN security" + - " initialization failed", - () -> service.initializeCertificateClient(conf)); - Assert.assertNotNull(client.getPrivateKey()); - Assert.assertNull(client.getPublicKey()); - Assert.assertNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: FAILURE")); - dnLogs.clearOutput(); - } - - @Test - public void testSecureDnStartupCase5() throws Exception { - // Case 5: If private key and certificate is present. - certCodec.writeCertificate(certHolder); - keyCodec.writePrivateKey(privateKey); - service.initializeCertificateClient(conf); - Assert.assertNotNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNotNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: SUCCESS")); - } - - @Test - public void testSecureDnStartupCase6() throws Exception { - // Case 6: If key pair already exist than response should be GETCERT. - keyCodec.writePublicKey(publicKey); - keyCodec.writePrivateKey(privateKey); - LambdaTestUtils.intercept(Exception.class, "", - () -> service.initializeCertificateClient(conf)); - Assert.assertNotNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: GETCERT")); - } - - @Test - public void testSecureDnStartupCase7() throws Exception { - // Case 7 When keypair and certificate is present. - keyCodec.writePublicKey(publicKey); - keyCodec.writePrivateKey(privateKey); - certCodec.writeCertificate(certHolder); - - service.initializeCertificateClient(conf); - Assert.assertNotNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNotNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: SUCCESS")); - } - - /** - * Invoke a callable; Ignore all exception. - * @param closure closure to execute - * @return - */ - public static void callQuietly(Callable closure) { - try { - closure.call(); - } catch (Throwable e) { - // Ignore all Throwable, - } - } - - @Test - public void testGetCSR() throws Exception { - keyCodec.writePublicKey(publicKey); - keyCodec.writePrivateKey(privateKey); - service.setCertificateClient(client); - PKCS10CertificationRequest csr = - service.getCSR(conf); - Assert.assertNotNull(csr); - - csr = service.getCSR(conf); - Assert.assertNotNull(csr); - - csr = service.getCSR(conf); - Assert.assertNotNull(csr); - - csr = service.getCSR(conf); - Assert.assertNotNull(csr); - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java deleted file mode 100644 index 923440e238265..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; -import org.apache.hadoop.ozone.protocolPB - .StorageContainerDatanodeProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; -import org.apache.hadoop.security.UserGroupInformation; - -import java.net.InetSocketAddress; - -/** - * Helper utility to test containers. - */ -public final class ContainerTestUtils { - - private ContainerTestUtils() { - } - - /** - * Creates an Endpoint class for testing purpose. - * - * @param conf - Conf - * @param address - InetAddres - * @param rpcTimeout - rpcTimeOut - * @return EndPoint - * @throws Exception - */ - public static EndpointStateMachine createEndpoint(Configuration conf, - InetSocketAddress address, int rpcTimeout) throws Exception { - RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class, - ProtobufRpcEngine.class); - long version = - RPC.getProtocolVersion(StorageContainerDatanodeProtocolPB.class); - - StorageContainerDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy( - StorageContainerDatanodeProtocolPB.class, version, - address, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), rpcTimeout, - RetryPolicies.TRY_ONCE_THEN_FAIL).getProxy(); - - StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient = - new StorageContainerDatanodeProtocolClientSideTranslatorPB(rpcProxy); - return new EndpointStateMachine(address, rpcClient, conf); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java deleted file mode 100644 index 5a7c30ca68f79..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common; - -import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.ServerSocket; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageContainerDatanodeProtocolService; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; -import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics; -import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; -import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB; -import org.apache.hadoop.test.GenericTestUtils; - -import com.google.protobuf.BlockingService; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import org.mockito.Mockito; - -/** - * Test Endpoint class. - */ -public final class SCMTestUtils { - /** - * Never constructed. - */ - private SCMTestUtils() { - } - - /** - * Starts an RPC server, if configured. - * - * @param conf configuration - * @param addr configured address of RPC server - * @param protocol RPC protocol provided by RPC server - * @param instance RPC protocol implementation instance - * @param handlerCount RPC server handler count - * @return RPC server - * @throws IOException if there is an I/O error while creating RPC server - */ - private static RPC.Server startRpcServer(Configuration conf, - InetSocketAddress addr, Class - protocol, BlockingService instance, int handlerCount) - throws IOException { - RPC.Server rpcServer = new RPC.Builder(conf) - .setProtocol(protocol) - .setInstance(instance) - .setBindAddress(addr.getHostString()) - .setPort(addr.getPort()) - .setNumHandlers(handlerCount) - .setVerbose(false) - .setSecretManager(null) - .build(); - - DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer); - return rpcServer; - } - - - /** - * Start Datanode RPC server. - */ - public static RPC.Server startScmRpcServer(Configuration configuration, - StorageContainerDatanodeProtocol server, - InetSocketAddress rpcServerAddresss, int handlerCount) throws - IOException { - RPC.setProtocolEngine(configuration, - StorageContainerDatanodeProtocolPB.class, - ProtobufRpcEngine.class); - - BlockingService scmDatanodeService = - StorageContainerDatanodeProtocolService. - newReflectiveBlockingService( - new StorageContainerDatanodeProtocolServerSideTranslatorPB( - server, Mockito.mock(ProtocolMessageMetrics.class))); - - RPC.Server scmServer = startRpcServer(configuration, rpcServerAddresss, - StorageContainerDatanodeProtocolPB.class, scmDatanodeService, - handlerCount); - - scmServer.start(); - return scmServer; - } - - public static InetSocketAddress getReuseableAddress() throws IOException { - try (ServerSocket socket = new ServerSocket(0)) { - socket.setReuseAddress(true); - int port = socket.getLocalPort(); - String addr = InetAddress.getLoopbackAddress().getHostAddress(); - return new InetSocketAddress(addr, port); - } - } - - public static OzoneConfiguration getConf() { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, GenericTestUtils - .getRandomizedTempPath()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, GenericTestUtils - .getRandomizedTempPath()); - return conf; - } - - public static OzoneConfiguration getOzoneConf() { - return new OzoneConfiguration(); - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java deleted file mode 100644 index c4b29ba2722d1..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java +++ /dev/null @@ -1,355 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CommandStatus; -import org.apache.hadoop.hdds.scm.VersionInfo; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; -import org.apache.hadoop.ozone.protocol.VersionResponse; - -import java.io.IOException; -import java.util.*; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * SCM RPC mock class. - */ -public class ScmTestMock implements StorageContainerDatanodeProtocol { - private int rpcResponseDelay; - private AtomicInteger heartbeatCount = new AtomicInteger(0); - private AtomicInteger rpcCount = new AtomicInteger(0); - private AtomicInteger containerReportsCount = new AtomicInteger(0); - private String clusterId; - private String scmId; - - public ScmTestMock() { - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - } - - // Map of datanode to containers - private Map> nodeContainers = - new HashMap<>(); - private Map nodeReports = new HashMap<>(); - private AtomicInteger commandStatusReport = new AtomicInteger(0); - private List cmdStatusList = new ArrayList<>(); - private List scmCommandRequests = new ArrayList<>(); - /** - * Returns the number of heartbeats made to this class. - * - * @return int - */ - public int getHeartbeatCount() { - return heartbeatCount.get(); - } - - /** - * Returns the number of RPC calls made to this mock class instance. - * - * @return - Number of RPC calls serviced by this class. - */ - public int getRpcCount() { - return rpcCount.get(); - } - - /** - * Gets the RPC response delay. - * - * @return delay in milliseconds. - */ - public int getRpcResponseDelay() { - return rpcResponseDelay; - } - - /** - * Sets the RPC response delay. - * - * @param rpcResponseDelay - delay in milliseconds. - */ - public void setRpcResponseDelay(int rpcResponseDelay) { - this.rpcResponseDelay = rpcResponseDelay; - } - - /** - * Returns the number of container reports server has seen. - * @return int - */ - public int getContainerReportsCount() { - return containerReportsCount.get(); - } - - /** - * Returns the number of containers that have been reported so far. - * @return - count of reported containers. - */ - public long getContainerCount() { - return nodeContainers.values().parallelStream().mapToLong((containerMap)->{ - return containerMap.size(); - }).sum(); - } - - /** - * Get the number keys reported from container reports. - * @return - number of keys reported. - */ - public long getKeyCount() { - return nodeContainers.values().parallelStream().mapToLong((containerMap)->{ - return containerMap.values().parallelStream().mapToLong((container) -> { - return container.getKeyCount(); - }).sum(); - }).sum(); - } - - /** - * Get the number of bytes used from container reports. - * @return - number of bytes used. - */ - public long getBytesUsed() { - return nodeContainers.values().parallelStream().mapToLong((containerMap)->{ - return containerMap.values().parallelStream().mapToLong((container) -> { - return container.getUsed(); - }).sum(); - }).sum(); - } - - /** - * Returns SCM version. - * - * @return Version info. - */ - @Override - public StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto - getVersion(StorageContainerDatanodeProtocolProtos - .SCMVersionRequestProto unused) throws IOException { - rpcCount.incrementAndGet(); - sleepIfNeeded(); - VersionInfo versionInfo = VersionInfo.getLatestVersion(); - return VersionResponse.newBuilder() - .setVersion(versionInfo.getVersion()) - .addValue(VersionInfo.DESCRIPTION_KEY, versionInfo.getDescription()) - .addValue(OzoneConsts.SCM_ID, scmId) - .addValue(OzoneConsts.CLUSTER_ID, clusterId) - .build().getProtobufMessage(); - - } - - private void sleepIfNeeded() { - if (getRpcResponseDelay() > 0) { - try { - Thread.sleep(getRpcResponseDelay()); - } catch (InterruptedException ex) { - // Just ignore this exception. - } - } - } - - /** - * Used by data node to send a Heartbeat. - * - * @param heartbeat - node heartbeat. - * @return - SCMHeartbeatResponseProto - * @throws IOException - */ - @Override - public StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto - sendHeartbeat(SCMHeartbeatRequestProto heartbeat) throws IOException { - rpcCount.incrementAndGet(); - heartbeatCount.incrementAndGet(); - if (heartbeat.getCommandStatusReportsCount() != 0) { - for (CommandStatusReportsProto statusReport : heartbeat - .getCommandStatusReportsList()) { - cmdStatusList.addAll(statusReport.getCmdStatusList()); - commandStatusReport.incrementAndGet(); - } - } - sleepIfNeeded(); - return SCMHeartbeatResponseProto.newBuilder().addAllCommands( - scmCommandRequests) - .setDatanodeUUID(heartbeat.getDatanodeDetails().getUuid()) - .build(); - } - - /** - * Register Datanode. - * - * @param datanodeDetailsProto DatanodDetailsProto. - * @return SCM Command. - */ - @Override - public StorageContainerDatanodeProtocolProtos - .SCMRegisteredResponseProto register( - DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport, - ContainerReportsProto containerReportsRequestProto, - PipelineReportsProto pipelineReportsProto) - throws IOException { - rpcCount.incrementAndGet(); - updateNodeReport(datanodeDetailsProto, nodeReport); - updateContainerReport(containerReportsRequestProto, datanodeDetailsProto); - sleepIfNeeded(); - return StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto - .newBuilder().setClusterID(UUID.randomUUID().toString()) - .setDatanodeUUID(datanodeDetailsProto.getUuid()).setErrorCode( - StorageContainerDatanodeProtocolProtos - .SCMRegisteredResponseProto.ErrorCode.success).build(); - } - - /** - * Update nodeReport. - * @param datanodeDetailsProto - * @param nodeReport - */ - public void updateNodeReport(DatanodeDetailsProto datanodeDetailsProto, - NodeReportProto nodeReport) { - DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( - datanodeDetailsProto); - NodeReportProto.Builder nodeReportProto = NodeReportProto.newBuilder(); - - List storageReports = - nodeReport.getStorageReportList(); - - for(StorageReportProto report : storageReports) { - nodeReportProto.addStorageReport(report); - } - - nodeReports.put(datanode, nodeReportProto.build()); - - } - - /** - * Update the cotainerReport. - * - * @param reports Container report - * @param datanodeDetails DataNode Info - * @throws IOException - */ - public void updateContainerReport( - StorageContainerDatanodeProtocolProtos.ContainerReportsProto reports, - DatanodeDetailsProto datanodeDetails) throws IOException { - Preconditions.checkNotNull(reports); - containerReportsCount.incrementAndGet(); - DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( - datanodeDetails); - if (reports.getReportsCount() > 0) { - Map containers = nodeContainers.get(datanode); - if (containers == null) { - containers = new LinkedHashMap(); - nodeContainers.put(datanode, containers); - } - - for (ContainerReplicaProto report : reports - .getReportsList()) { - containers.put(report.getContainerID(), report); - } - } - } - - - /** - * Return the number of StorageReports of a datanode. - * @param datanodeDetails - * @return count of containers of a datanode - */ - public int getNodeReportsCount(DatanodeDetails datanodeDetails) { - return nodeReports.get(datanodeDetails).getStorageReportCount(); - } - - /** - * Returns the number of containers of a datanode. - * @param datanodeDetails - * @return count of storage reports of a datanode - */ - public int getContainerCountsForDatanode(DatanodeDetails datanodeDetails) { - Map cr = - nodeContainers.get(datanodeDetails); - if(cr != null) { - return cr.size(); - } - return 0; - } - - /** - * Reset the mock Scm for test to get a fresh start without rebuild MockScm. - */ - public void reset() { - heartbeatCount.set(0); - rpcCount.set(0); - containerReportsCount.set(0); - nodeContainers.clear(); - - } - - public int getCommandStatusReportCount() { - return commandStatusReport.get(); - } - - public List getCmdStatusList() { - return cmdStatusList; - } - - public List getScmCommandRequests() { - return scmCommandRequests; - } - - public void clearScmCommandRequests() { - scmCommandRequests.clear(); - } - - public void addScmCommandRequest(SCMCommandProto scmCmd) { - scmCommandRequests.add(scmCmd); - } - - /** - * Set scmId. - * @param id - */ - public void setScmId(String id) { - this.scmId = id; - } - - /** - * Set scmId. - * @return scmId - */ - public String getScmId() { - return scmId; - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java deleted file mode 100644 index a4e0028e10819..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common; - -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; -import org.junit.Assert; -import org.junit.Test; - -/** - * This class tests ChunkLayOutVersion. - */ -public class TestChunkLayOutVersion { - - @Test - public void testChunkLayOutVersion() { - - // Check Latest Version and description - Assert.assertEquals(1, ChunkLayOutVersion.getLatestVersion().getVersion()); - Assert.assertEquals("Data without checksums.", ChunkLayOutVersion - .getLatestVersion().getDescription()); - - Assert.assertEquals(1, ChunkLayOutVersion.getAllVersions().length); - - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java deleted file mode 100644 index b6584d17017b7..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common; - -import org.apache.hadoop.fs.FileSystemTestHelper; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.apache.hadoop.hdds.utils.MetadataStore; -import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.io.File; - - -/** - * Test ContainerCache with evictions. - */ -public class TestContainerCache { - private static String testRoot = new FileSystemTestHelper().getTestRootDir(); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private void createContainerDB(OzoneConfiguration conf, File dbFile) - throws Exception { - MetadataStore store = MetadataStoreBuilder.newBuilder().setConf(conf) - .setCreateIfMissing(true).setDbFile(dbFile).build(); - - // we close since the SCM pre-creates containers. - // we will open and put Db handle into a cache when keys are being created - // in a container. - - store.close(); - } - - @Test - public void testContainerCacheEviction() throws Exception { - File root = new File(testRoot); - root.mkdirs(); - - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2); - - ContainerCache cache = ContainerCache.getInstance(conf); - File containerDir1 = new File(root, "cont1"); - File containerDir2 = new File(root, "cont2"); - File containerDir3 = new File(root, "cont3"); - File containerDir4 = new File(root, "cont4"); - - - createContainerDB(conf, containerDir1); - createContainerDB(conf, containerDir2); - createContainerDB(conf, containerDir3); - createContainerDB(conf, containerDir4); - - // Get 2 references out of the same db and verify the objects are same. - ReferenceCountedDB db1 = cache.getDB(1, "RocksDB", - containerDir1.getPath(), conf); - Assert.assertEquals(1, db1.getReferenceCount()); - ReferenceCountedDB db2 = cache.getDB(1, "RocksDB", - containerDir1.getPath(), conf); - Assert.assertEquals(2, db2.getReferenceCount()); - Assert.assertEquals(2, db1.getReferenceCount()); - Assert.assertEquals(db1, db2); - - // add one more references to ContainerCache. - ReferenceCountedDB db3 = cache.getDB(2, "RocksDB", - containerDir2.getPath(), conf); - Assert.assertEquals(1, db3.getReferenceCount()); - - // and close the reference - db3.close(); - Assert.assertEquals(0, db3.getReferenceCount()); - - Assert.assertTrue(cache.isFull()); - - // add one more reference to ContainerCache and verify that it will not - // evict the least recent entry as it has reference. - ReferenceCountedDB db4 = cache.getDB(3, "RocksDB", - containerDir3.getPath(), conf); - Assert.assertEquals(1, db4.getReferenceCount()); - - Assert.assertEquals(2, cache.size()); - Assert.assertNotNull(cache.get(containerDir1.getPath())); - Assert.assertNull(cache.get(containerDir2.getPath())); - - // Now close both the references for container1 - db1.close(); - db2.close(); - Assert.assertEquals(0, db1.getReferenceCount()); - Assert.assertEquals(0, db2.getReferenceCount()); - - - // The reference count for container1 is 0 but it is not evicted. - ReferenceCountedDB db5 = cache.getDB(1, "RocksDB", - containerDir1.getPath(), conf); - Assert.assertEquals(1, db5.getReferenceCount()); - Assert.assertEquals(db1, db5); - db5.close(); - db4.close(); - - - // Decrementing reference count below zero should fail. - thrown.expect(IllegalArgumentException.class); - db5.close(); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java deleted file mode 100644 index 5cabef295f340..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common; - -import org.junit.Assert; -import org.junit.Test; - -/** - * This class tests DatanodeLayOutVersion. - */ -public class TestDatanodeLayOutVersion { - - @Test - public void testDatanodeLayOutVersion() { - // Check Latest Version and description - Assert.assertEquals(1, DataNodeLayoutVersion.getLatestVersion() - .getVersion()); - Assert.assertEquals("HDDS Datanode LayOut Version 1", DataNodeLayoutVersion - .getLatestVersion().getDescription()); - Assert.assertEquals(DataNodeLayoutVersion.getAllVersions().length, - DataNodeLayoutVersion.getAllVersions().length); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java deleted file mode 100644 index 0f3e7d12d220f..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ /dev/null @@ -1,444 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common; - -import com.google.common.collect.Maps; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.states.DatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode - .InitDatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode - .RunningDatanodeState; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT; -import static org.junit.Assert.assertTrue; - -/** - * Tests the datanode state machine class and its states. - */ -public class TestDatanodeStateMachine { - private static final Logger LOG = - LoggerFactory.getLogger(TestDatanodeStateMachine.class); - // Changing it to 1, as current code checks for multiple scm directories, - // and fail if exists - private final int scmServerCount = 1; - private List serverAddresses; - private List scmServers; - private List mockServers; - private ExecutorService executorService; - private Configuration conf; - private File testRoot; - - @Before - public void setUp() throws Exception { - conf = SCMTestUtils.getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500, - TimeUnit.MILLISECONDS); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true); - serverAddresses = new ArrayList<>(); - scmServers = new ArrayList<>(); - mockServers = new ArrayList<>(); - for (int x = 0; x < scmServerCount; x++) { - int port = SCMTestUtils.getReuseableAddress().getPort(); - String address = "127.0.0.1"; - serverAddresses.add(address + ":" + port); - ScmTestMock mock = new ScmTestMock(); - scmServers.add(SCMTestUtils.startScmRpcServer(conf, mock, - new InetSocketAddress(address, port), 10)); - mockServers.add(mock); - } - - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, - serverAddresses.toArray(new String[0])); - - String path = GenericTestUtils - .getTempPath(TestDatanodeStateMachine.class.getSimpleName()); - testRoot = new File(path); - if (!testRoot.mkdirs()) { - LOG.info("Required directories {} already exist.", testRoot); - } - - File dataDir = new File(testRoot, "data"); - conf.set(HDDS_DATANODE_DIR_KEY, dataDir.getAbsolutePath()); - if (!dataDir.mkdirs()) { - LOG.info("Data dir create failed."); - } - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - new File(testRoot, "scm").getAbsolutePath()); - path = new File(testRoot, "datanodeID").getAbsolutePath(); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR, path); - executorService = HadoopExecutors.newCachedThreadPool( - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Test Data Node State Machine Thread - %d").build()); - } - - @After - public void tearDown() throws Exception { - try { - if (executorService != null) { - executorService.shutdown(); - try { - if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { - executorService.shutdownNow(); - } - - if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { - LOG.error("Unable to shutdown properly."); - } - } catch (InterruptedException e) { - LOG.error("Error attempting to shutdown.", e); - executorService.shutdownNow(); - } - } - for (RPC.Server s : scmServers) { - s.stop(); - } - } catch (Exception e) { - //ignore all execption from the shutdown - } finally { - FileUtil.fullyDelete(testRoot); - } - } - - /** - * Assert that starting statemachine executes the Init State. - */ - @Test - public void testStartStopDatanodeStateMachine() throws IOException, - InterruptedException, TimeoutException { - try (DatanodeStateMachine stateMachine = - new DatanodeStateMachine(getNewDatanodeDetails(), conf, null, null)) { - stateMachine.startDaemon(); - SCMConnectionManager connectionManager = - stateMachine.getConnectionManager(); - GenericTestUtils.waitFor( - () -> { - int size = connectionManager.getValues().size(); - LOG.info("connectionManager.getValues().size() is {}", size); - return size == 1; - }, 1000, 30000); - - stateMachine.stopDaemon(); - assertTrue(stateMachine.isDaemonStopped()); - } - } - - /** - * This test explores the state machine by invoking each call in sequence just - * like as if the state machine would call it. Because this is a test we are - * able to verify each of the assumptions. - *

- * Here is what happens at High level. - *

- * 1. We start the datanodeStateMachine in the INIT State. - *

- * 2. We invoke the INIT state task. - *

- * 3. That creates a set of RPC endpoints that are ready to connect to SCMs. - *

- * 4. We assert that we have moved to the running state for the - * DatanodeStateMachine. - *

- * 5. We get the task for the Running State - Executing that running state, - * makes the first network call in of the state machine. The Endpoint is in - * the GETVERSION State and we invoke the task. - *

- * 6. We assert that this call was a success by checking that each of the - * endponts now have version response that it got from the SCM server that it - * was talking to and also each of the mock server serviced one RPC call. - *

- * 7. Since the Register is done now, next calls to get task will return - * HeartbeatTask, which sends heartbeats to SCM. We assert that we get right - * task from sub-system below. - * - * @throws IOException - */ - @Test - public void testDatanodeStateContext() throws IOException, - InterruptedException, ExecutionException, TimeoutException { - // There is no mini cluster started in this test, - // create a ID file so that state machine could load a fake datanode ID. - File idPath = new File( - conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR), - OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT); - idPath.delete(); - DatanodeDetails datanodeDetails = getNewDatanodeDetails(); - DatanodeDetails.Port port = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); - datanodeDetails.setPort(port); - ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath); - - try (DatanodeStateMachine stateMachine = - new DatanodeStateMachine(datanodeDetails, conf, null, null)) { - DatanodeStateMachine.DatanodeStates currentState = - stateMachine.getContext().getState(); - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.INIT, - currentState); - - DatanodeState task = - stateMachine.getContext().getTask(); - Assert.assertEquals(InitDatanodeState.class, task.getClass()); - - task.execute(executorService); - DatanodeStateMachine.DatanodeStates newState = - task.await(2, TimeUnit.SECONDS); - - for (EndpointStateMachine endpoint : - stateMachine.getConnectionManager().getValues()) { - // We assert that each of the is in State GETVERSION. - Assert.assertEquals(EndpointStateMachine.EndPointStates.GETVERSION, - endpoint.getState()); - } - - // The Datanode has moved into Running State, since endpoints are created. - // We move to running state when we are ready to issue RPC calls to SCMs. - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING, - newState); - - // If we had called context.execute instead of calling into each state - // this would have happened automatically. - stateMachine.getContext().setState(newState); - task = stateMachine.getContext().getTask(); - Assert.assertEquals(RunningDatanodeState.class, task.getClass()); - - // This execute will invoke getVersion calls against all SCM endpoints - // that we know of. - - task.execute(executorService); - newState = task.await(10, TimeUnit.SECONDS); - - // Wait for GetVersion call (called by task.execute) to finish. After - // Earlier task.execute called into GetVersion. Wait for the execution - // to finish and the endPointState to move to REGISTER state. - GenericTestUtils.waitFor(() -> { - for (EndpointStateMachine endpoint : - stateMachine.getConnectionManager().getValues()) { - if (endpoint.getState() != - EndpointStateMachine.EndPointStates.REGISTER) { - return false; - } - } - return true; - }, 1000, 50000); - - // If we are in running state, we should be in running. - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING, - newState); - - for (EndpointStateMachine endpoint : - stateMachine.getConnectionManager().getValues()) { - - // Since the earlier task.execute called into GetVersion, the - // endPointState Machine should move to REGISTER state. - Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER, - endpoint.getState()); - - // We assert that each of the end points have gotten a version from the - // SCM Server. - Assert.assertNotNull(endpoint.getVersion()); - } - - // We can also assert that all mock servers have received only one RPC - // call at this point of time. - for (ScmTestMock mock : mockServers) { - Assert.assertEquals(1, mock.getRpcCount()); - } - - // This task is the Running task, but running task executes tasks based - // on the state of Endpoints, hence this next call will be a Register at - // the endpoint RPC level. - task = stateMachine.getContext().getTask(); - task.execute(executorService); - newState = task.await(2, TimeUnit.SECONDS); - - // If we are in running state, we should be in running. - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING, - newState); - - for (ScmTestMock mock : mockServers) { - Assert.assertEquals(2, mock.getRpcCount()); - } - - // This task is the Running task, but running task executes tasks based - // on the state of Endpoints, hence this next call will be a - // HeartbeatTask at the endpoint RPC level. - task = stateMachine.getContext().getTask(); - task.execute(executorService); - newState = task.await(2, TimeUnit.SECONDS); - - // If we are in running state, we should be in running. - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING, - newState); - - - for (ScmTestMock mock : mockServers) { - Assert.assertEquals(1, mock.getHeartbeatCount()); - } - } - } - - @Test - public void testDatanodeStateMachineWithIdWriteFail() throws Exception { - - File idPath = new File( - conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR), - OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT); - idPath.delete(); - DatanodeDetails datanodeDetails = getNewDatanodeDetails(); - DatanodeDetails.Port port = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); - datanodeDetails.setPort(port); - - try (DatanodeStateMachine stateMachine = - new DatanodeStateMachine(datanodeDetails, conf, null, null)) { - DatanodeStateMachine.DatanodeStates currentState = - stateMachine.getContext().getState(); - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.INIT, - currentState); - - DatanodeState task = - stateMachine.getContext().getTask(); - Assert.assertEquals(InitDatanodeState.class, task.getClass()); - - //Set the idPath to read only, state machine will fail to write - // datanodeId file and set the state to shutdown. - idPath.getParentFile().mkdirs(); - idPath.getParentFile().setReadOnly(); - - task.execute(executorService); - DatanodeStateMachine.DatanodeStates newState = - task.await(2, TimeUnit.SECONDS); - - //As, we have changed the permission of idPath to readable, writing - // will fail and it will set the state to shutdown. - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN, - newState); - - //Setting back to writable. - idPath.getParentFile().setWritable(true); - } - } - - /** - * Test state transition with a list of invalid scm configurations, - * and verify the state transits to SHUTDOWN each time. - */ - @Test - public void testDatanodeStateMachineWithInvalidConfiguration() - throws Exception { - List> confList = - new ArrayList<>(); - confList.add(Maps.immutableEntry(ScmConfigKeys.OZONE_SCM_NAMES, "")); - - // Invalid ozone.scm.names - /** Empty **/ - confList.add(Maps.immutableEntry( - ScmConfigKeys.OZONE_SCM_NAMES, "")); - /** Invalid schema **/ - confList.add(Maps.immutableEntry( - ScmConfigKeys.OZONE_SCM_NAMES, "x..y")); - /** Invalid port **/ - confList.add(Maps.immutableEntry( - ScmConfigKeys.OZONE_SCM_NAMES, "scm:xyz")); - /** Port out of range **/ - confList.add(Maps.immutableEntry( - ScmConfigKeys.OZONE_SCM_NAMES, "scm:123456")); - // Invalid ozone.scm.datanode.id.dir - /** Empty **/ - confList.add(Maps.immutableEntry( - ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR, "")); - - confList.forEach((entry) -> { - Configuration perTestConf = new Configuration(conf); - perTestConf.setStrings(entry.getKey(), entry.getValue()); - LOG.info("Test with {} = {}", entry.getKey(), entry.getValue()); - try (DatanodeStateMachine stateMachine = new DatanodeStateMachine( - getNewDatanodeDetails(), perTestConf, null, null)) { - DatanodeStateMachine.DatanodeStates currentState = - stateMachine.getContext().getState(); - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.INIT, - currentState); - DatanodeState task = - stateMachine.getContext().getTask(); - task.execute(executorService); - DatanodeStateMachine.DatanodeStates newState = - task.await(2, TimeUnit.SECONDS); - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN, - newState); - } catch (Exception e) { - Assert.fail("Unexpected exception found"); - } - }); - } - - private DatanodeDetails getNewDatanodeDetails() { - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); - return DatanodeDetails.newBuilder() - .setUuid(UUID.randomUUID().toString()) - .setHostName("localhost") - .setIpAddress("127.0.0.1") - .addPort(containerPort) - .addPort(ratisPort) - .addPort(restPort) - .build(); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java deleted file mode 100644 index c6fa8d62102ff..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; - -/** - * This class is used to test the KeyValueContainerData. - */ -public class TestKeyValueContainerData { - - private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5); - @Test - public void testKeyValueData() { - long containerId = 1L; - ContainerProtos.ContainerType containerType = ContainerProtos - .ContainerType.KeyValueContainer; - String path = "/tmp"; - String containerDBType = "RocksDB"; - ContainerProtos.ContainerDataProto.State state = - ContainerProtos.ContainerDataProto.State.CLOSED; - AtomicLong val = new AtomicLong(0); - UUID pipelineId = UUID.randomUUID(); - UUID datanodeId = UUID.randomUUID(); - - KeyValueContainerData kvData = new KeyValueContainerData(containerId, - MAXSIZE, pipelineId.toString(), datanodeId.toString()); - - assertEquals(containerType, kvData.getContainerType()); - assertEquals(containerId, kvData.getContainerID()); - assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, kvData - .getState()); - assertEquals(0, kvData.getMetadata().size()); - assertEquals(0, kvData.getNumPendingDeletionBlocks()); - assertEquals(val.get(), kvData.getReadBytes()); - assertEquals(val.get(), kvData.getWriteBytes()); - assertEquals(val.get(), kvData.getReadCount()); - assertEquals(val.get(), kvData.getWriteCount()); - assertEquals(val.get(), kvData.getKeyCount()); - assertEquals(val.get(), kvData.getNumPendingDeletionBlocks()); - assertEquals(MAXSIZE, kvData.getMaxSize()); - - kvData.setState(state); - kvData.setContainerDBType(containerDBType); - kvData.setChunksPath(path); - kvData.setMetadataPath(path); - kvData.incrReadBytes(10); - kvData.incrWriteBytes(10); - kvData.incrReadCount(); - kvData.incrWriteCount(); - kvData.incrKeyCount(); - kvData.incrPendingDeletionBlocks(1); - - assertEquals(state, kvData.getState()); - assertEquals(containerDBType, kvData.getContainerDBType()); - assertEquals(path, kvData.getChunksPath()); - assertEquals(path, kvData.getMetadataPath()); - - assertEquals(10, kvData.getReadBytes()); - assertEquals(10, kvData.getWriteBytes()); - assertEquals(1, kvData.getReadCount()); - assertEquals(1, kvData.getWriteCount()); - assertEquals(1, kvData.getKeyCount()); - assertEquals(1, kvData.getNumPendingDeletionBlocks()); - assertEquals(pipelineId.toString(), kvData.getOriginPipelineId()); - assertEquals(datanodeId.toString(), kvData.getOriginNodeId()); - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java deleted file mode 100644 index 58892227a6576..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.helpers; - -import org.apache.hadoop.ozone.common.InconsistentStorageStateException; -import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.Time; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.File; -import java.io.IOException; -import java.util.Properties; -import java.util.UUID; - -import static org.junit.Assert.*; - -/** - * This class tests {@link DatanodeVersionFile}. - */ -public class TestDatanodeVersionFile { - - private File versionFile; - private DatanodeVersionFile dnVersionFile; - private Properties properties; - - private String storageID; - private String clusterID; - private String datanodeUUID; - private long cTime; - private int lv; - - @Rule - public TemporaryFolder folder= new TemporaryFolder(); - - @Before - public void setup() throws IOException { - versionFile = folder.newFile("Version"); - storageID = UUID.randomUUID().toString(); - clusterID = UUID.randomUUID().toString(); - datanodeUUID = UUID.randomUUID().toString(); - cTime = Time.now(); - lv = DataNodeLayoutVersion.getLatestVersion().getVersion(); - - dnVersionFile = new DatanodeVersionFile( - storageID, clusterID, datanodeUUID, cTime, lv); - - dnVersionFile.createVersionFile(versionFile); - - properties = dnVersionFile.readFrom(versionFile); - } - - @Test - public void testCreateAndReadVersionFile() throws IOException{ - - //Check VersionFile exists - assertTrue(versionFile.exists()); - - assertEquals(storageID, HddsVolumeUtil.getStorageID( - properties, versionFile)); - assertEquals(clusterID, HddsVolumeUtil.getClusterID( - properties, versionFile, clusterID)); - assertEquals(datanodeUUID, HddsVolumeUtil.getDatanodeUUID( - properties, versionFile, datanodeUUID)); - assertEquals(cTime, HddsVolumeUtil.getCreationTime( - properties, versionFile)); - assertEquals(lv, HddsVolumeUtil.getLayOutVersion( - properties, versionFile)); - } - - @Test - public void testIncorrectClusterId() throws IOException{ - try { - String randomClusterID = UUID.randomUUID().toString(); - HddsVolumeUtil.getClusterID(properties, versionFile, - randomClusterID); - fail("Test failure in testIncorrectClusterId"); - } catch (InconsistentStorageStateException ex) { - GenericTestUtils.assertExceptionContains("Mismatched ClusterIDs", ex); - } - } - - @Test - public void testVerifyCTime() throws IOException{ - long invalidCTime = -10; - dnVersionFile = new DatanodeVersionFile( - storageID, clusterID, datanodeUUID, invalidCTime, lv); - dnVersionFile.createVersionFile(versionFile); - properties = dnVersionFile.readFrom(versionFile); - - try { - HddsVolumeUtil.getCreationTime(properties, versionFile); - fail("Test failure in testVerifyCTime"); - } catch (InconsistentStorageStateException ex) { - GenericTestUtils.assertExceptionContains("Invalid Creation time in " + - "Version File : " + versionFile, ex); - } - } - - @Test - public void testVerifyLayOut() throws IOException{ - int invalidLayOutVersion = 100; - dnVersionFile = new DatanodeVersionFile( - storageID, clusterID, datanodeUUID, cTime, invalidLayOutVersion); - dnVersionFile.createVersionFile(versionFile); - Properties props = dnVersionFile.readFrom(versionFile); - - try { - HddsVolumeUtil.getLayOutVersion(props, versionFile); - fail("Test failure in testVerifyLayOut"); - } catch (InconsistentStorageStateException ex) { - GenericTestUtils.assertExceptionContains("Invalid layOutVersion.", ex); - } - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java deleted file mode 100644 index c611ccb28e7e1..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.fs.FileSystemTestHelper; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.util.UUID; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * This class tests create/read .container files. - */ -public class TestContainerDataYaml { - - private static long testContainerID = 1234; - - private static String testRoot = new FileSystemTestHelper().getTestRootDir(); - - private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5); - - /** - * Creates a .container file. cleanup() should be called at the end of the - * test when container file is created. - */ - private File createContainerFile(long containerID) throws IOException { - new File(testRoot).mkdirs(); - - String containerPath = containerID + ".container"; - - KeyValueContainerData keyValueContainerData = new KeyValueContainerData( - containerID, MAXSIZE, UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - keyValueContainerData.setContainerDBType("RocksDB"); - keyValueContainerData.setMetadataPath(testRoot); - keyValueContainerData.setChunksPath(testRoot); - - File containerFile = new File(testRoot, containerPath); - - // Create .container file with ContainerData - ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType - .KeyValueContainer, keyValueContainerData, containerFile); - - //Check .container file exists or not. - assertTrue(containerFile.exists()); - - return containerFile; - } - - private void cleanup() { - FileUtil.fullyDelete(new File(testRoot)); - } - - @Test - public void testCreateContainerFile() throws IOException { - long containerID = testContainerID++; - - File containerFile = createContainerFile(containerID); - - // Read from .container file, and verify data. - KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - assertEquals(containerID, kvData.getContainerID()); - assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData - .getContainerType()); - assertEquals("RocksDB", kvData.getContainerDBType()); - assertEquals(containerFile.getParent(), kvData.getMetadataPath()); - assertEquals(containerFile.getParent(), kvData.getChunksPath()); - assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, kvData - .getState()); - assertEquals(1, kvData.getLayOutVersion()); - assertEquals(0, kvData.getMetadata().size()); - assertEquals(MAXSIZE, kvData.getMaxSize()); - - // Update ContainerData. - kvData.addMetadata("VOLUME", "hdfs"); - kvData.addMetadata("OWNER", "ozone"); - kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED); - - - ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType - .KeyValueContainer, kvData, containerFile); - - // Reading newly updated data from .container file - kvData = (KeyValueContainerData) ContainerDataYaml.readContainerFile( - containerFile); - - // verify data. - assertEquals(containerID, kvData.getContainerID()); - assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData - .getContainerType()); - assertEquals("RocksDB", kvData.getContainerDBType()); - assertEquals(containerFile.getParent(), kvData.getMetadataPath()); - assertEquals(containerFile.getParent(), kvData.getChunksPath()); - assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData - .getState()); - assertEquals(1, kvData.getLayOutVersion()); - assertEquals(2, kvData.getMetadata().size()); - assertEquals("hdfs", kvData.getMetadata().get("VOLUME")); - assertEquals("ozone", kvData.getMetadata().get("OWNER")); - assertEquals(MAXSIZE, kvData.getMaxSize()); - } - - @Test - public void testIncorrectContainerFile() throws IOException{ - try { - String containerFile = "incorrect.container"; - //Get file from resources folder - ClassLoader classLoader = getClass().getClassLoader(); - File file = new File(classLoader.getResource(containerFile).getFile()); - KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(file); - fail("testIncorrectContainerFile failed"); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("No enum constant", ex); - } - } - - - @Test - public void testCheckBackWardCompatibilityOfContainerFile() throws - IOException { - // This test is for if we upgrade, and then .container files added by new - // server will have new fields added to .container file, after a while we - // decided to rollback. Then older ozone can read .container files - // created or not. - - try { - String containerFile = "additionalfields.container"; - //Get file from resources folder - ClassLoader classLoader = getClass().getClassLoader(); - File file = new File(classLoader.getResource(containerFile).getFile()); - KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(file); - ContainerUtils.verifyChecksum(kvData); - - //Checking the Container file data is consistent or not - assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData - .getState()); - assertEquals("RocksDB", kvData.getContainerDBType()); - assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData - .getContainerType()); - assertEquals(9223372036854775807L, kvData.getContainerID()); - assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData - .getChunksPath()); - assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData - .getMetadataPath()); - assertEquals(1, kvData.getLayOutVersion()); - assertEquals(2, kvData.getMetadata().size()); - - } catch (Exception ex) { - ex.printStackTrace(); - fail("testCheckBackWardCompatibilityOfContainerFile failed"); - } - } - - /** - * Test to verify {@link ContainerUtils#verifyChecksum(ContainerData)}. - */ - @Test - public void testChecksumInContainerFile() throws IOException { - long containerID = testContainerID++; - - File containerFile = createContainerFile(containerID); - - // Read from .container file, and verify data. - KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - ContainerUtils.verifyChecksum(kvData); - - cleanup(); - } - - /** - * Test to verify incorrect checksum is detected. - */ - @Test - public void testIncorrectChecksum() { - try { - String containerFile = "incorrect.checksum.container"; - //Get file from resources folder - ClassLoader classLoader = getClass().getClassLoader(); - File file = new File(classLoader.getResource(containerFile).getFile()); - KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(file); - ContainerUtils.verifyChecksum(kvData); - fail("testIncorrectChecksum failed"); - } catch (Exception ex) { - GenericTestUtils.assertExceptionContains("Container checksum error for " + - "ContainerID:", ex); - } - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java deleted file mode 100644 index e1e7119727b30..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java +++ /dev/null @@ -1,227 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.interfaces.Container; - -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * Class used to test ContainerSet operations. - */ -public class TestContainerSet { - - @Test - public void testAddGetRemoveContainer() throws StorageContainerException { - ContainerSet containerSet = new ContainerSet(); - long containerId = 100L; - ContainerProtos.ContainerDataProto.State state = ContainerProtos - .ContainerDataProto.State.CLOSED; - - KeyValueContainerData kvData = new KeyValueContainerData(containerId, - (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - kvData.setState(state); - KeyValueContainer keyValueContainer = new KeyValueContainer(kvData, new - OzoneConfiguration()); - - //addContainer - boolean result = containerSet.addContainer(keyValueContainer); - assertTrue(result); - try { - result = containerSet.addContainer(keyValueContainer); - fail("Adding same container ID twice should fail."); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("Container already exists with" + - " container Id " + containerId, ex); - } - - //getContainer - KeyValueContainer container = (KeyValueContainer) containerSet - .getContainer(containerId); - KeyValueContainerData keyValueContainerData = (KeyValueContainerData) - container.getContainerData(); - assertEquals(containerId, keyValueContainerData.getContainerID()); - assertEquals(state, keyValueContainerData.getState()); - assertNull(containerSet.getContainer(1000L)); - - //removeContainer - assertTrue(containerSet.removeContainer(containerId)); - assertFalse(containerSet.removeContainer(1000L)); - } - - @Test - public void testIteratorsAndCount() throws StorageContainerException { - - ContainerSet containerSet = createContainerSet(); - - assertEquals(10, containerSet.containerCount()); - - Iterator> iterator = containerSet.getContainerIterator(); - - int count = 0; - while(iterator.hasNext()) { - Container kv = iterator.next(); - ContainerData containerData = kv.getContainerData(); - long containerId = containerData.getContainerID(); - if (containerId%2 == 0) { - assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, - containerData.getState()); - } else { - assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, - containerData.getState()); - } - count++; - } - assertEquals(10, count); - - //Using containerMapIterator. - Iterator>> containerMapIterator = containerSet - .getContainerMapIterator(); - - count = 0; - while (containerMapIterator.hasNext()) { - Container kv = containerMapIterator.next().getValue(); - ContainerData containerData = kv.getContainerData(); - long containerId = containerData.getContainerID(); - if (containerId%2 == 0) { - assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, - containerData.getState()); - } else { - assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, - containerData.getState()); - } - count++; - } - assertEquals(10, count); - - } - - @Test - public void testIteratorPerVolume() throws StorageContainerException { - HddsVolume vol1 = Mockito.mock(HddsVolume.class); - Mockito.when(vol1.getStorageID()).thenReturn("uuid-1"); - HddsVolume vol2 = Mockito.mock(HddsVolume.class); - Mockito.when(vol2.getStorageID()).thenReturn("uuid-2"); - - ContainerSet containerSet = new ContainerSet(); - for (int i=0; i<10; i++) { - KeyValueContainerData kvData = new KeyValueContainerData(i, - (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - if (i%2 == 0) { - kvData.setVolume(vol1); - } else { - kvData.setVolume(vol2); - } - kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED); - KeyValueContainer kv = new KeyValueContainer(kvData, new - OzoneConfiguration()); - containerSet.addContainer(kv); - } - - Iterator> iter1 = containerSet.getContainerIterator(vol1); - int count1 = 0; - while (iter1.hasNext()) { - Container c = iter1.next(); - assertEquals(0, (c.getContainerData().getContainerID() % 2)); - count1++; - } - assertEquals(5, count1); - - Iterator> iter2 = containerSet.getContainerIterator(vol2); - int count2 = 0; - while (iter2.hasNext()) { - Container c = iter2.next(); - assertEquals(1, (c.getContainerData().getContainerID() % 2)); - count2++; - } - assertEquals(5, count2); - } - - @Test - public void testGetContainerReport() throws IOException { - - ContainerSet containerSet = createContainerSet(); - - ContainerReportsProto containerReportsRequestProto = containerSet - .getContainerReport(); - - assertEquals(10, containerReportsRequestProto.getReportsList().size()); - } - - - - @Test - public void testListContainer() throws StorageContainerException { - ContainerSet containerSet = createContainerSet(); - - List result = new ArrayList<>(); - containerSet.listContainer(2, 5, result); - - assertEquals(5, result.size()); - - for(ContainerData containerData : result) { - assertTrue(containerData.getContainerID() >=2 && containerData - .getContainerID()<=6); - } - } - - private ContainerSet createContainerSet() throws StorageContainerException { - ContainerSet containerSet = new ContainerSet(); - for (int i=0; i<10; i++) { - KeyValueContainerData kvData = new KeyValueContainerData(i, - (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - if (i%2 == 0) { - kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED); - } else { - kvData.setState(ContainerProtos.ContainerDataProto.State.OPEN); - } - KeyValueContainer kv = new KeyValueContainer(kvData, new - OzoneConfiguration()); - containerSet.addContainer(kv); - } - return containerSet; - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java deleted file mode 100644 index fe27eeb02d6b1..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ /dev/null @@ -1,300 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.collect.Maps; -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto - .ContainerProtos.ContainerType; -import org.apache.hadoop.hdds.protocol.datanode.proto - .ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .WriteChunkRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.File; -import java.io.IOException; -import java.util.Map; -import java.util.UUID; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -/** - * Test-cases to verify the functionality of HddsDispatcher. - */ -public class TestHddsDispatcher { - - @Test - public void testContainerCloseActionWhenFull() throws IOException { - String testDir = GenericTestUtils.getTempPath( - TestHddsDispatcher.class.getSimpleName()); - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testDir); - DatanodeDetails dd = randomDatanodeDetails(); - VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf); - - try { - UUID scmId = UUID.randomUUID(); - ContainerSet containerSet = new ContainerSet(); - - DatanodeStateMachine stateMachine = Mockito.mock( - DatanodeStateMachine.class); - StateContext context = Mockito.mock(StateContext.class); - Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd); - Mockito.when(context.getParent()).thenReturn(stateMachine); - KeyValueContainerData containerData = new KeyValueContainerData(1L, - (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), - dd.getUuidString()); - Container container = new KeyValueContainer(containerData, conf); - container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), - scmId.toString()); - containerSet.addContainer(container); - ContainerMetrics metrics = ContainerMetrics.create(conf); - Map handlers = Maps.newHashMap(); - for (ContainerType containerType : ContainerType.values()) { - handlers.put(containerType, - Handler.getHandlerForContainerType(containerType, conf, context, - containerSet, volumeSet, metrics)); - } - HddsDispatcher hddsDispatcher = new HddsDispatcher( - conf, containerSet, volumeSet, handlers, context, metrics); - hddsDispatcher.setScmId(scmId.toString()); - ContainerCommandResponseProto responseOne = hddsDispatcher - .dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, - responseOne.getResult()); - verify(context, times(0)) - .addContainerActionIfAbsent(Mockito.any(ContainerAction.class)); - containerData.setBytesUsed(Double.valueOf( - StorageUnit.MB.toBytes(950)).longValue()); - ContainerCommandResponseProto responseTwo = hddsDispatcher - .dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, - responseTwo.getResult()); - verify(context, times(1)) - .addContainerActionIfAbsent(Mockito.any(ContainerAction.class)); - - } finally { - volumeSet.shutdown(); - FileUtils.deleteDirectory(new File(testDir)); - } - - } - - @Test - public void testCreateContainerWithWriteChunk() throws IOException { - String testDir = - GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName()); - try { - UUID scmId = UUID.randomUUID(); - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testDir); - DatanodeDetails dd = randomDatanodeDetails(); - HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); - ContainerCommandRequestProto writeChunkRequest = - getWriteChunkRequest(dd.getUuidString(), 1L, 1L); - // send read chunk request and make sure container does not exist - ContainerCommandResponseProto response = - hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest), null); - Assert.assertEquals(response.getResult(), - ContainerProtos.Result.CONTAINER_NOT_FOUND); - // send write chunk request without sending create container - response = hddsDispatcher.dispatch(writeChunkRequest, null); - // container should be created as part of write chunk request - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - // send read chunk request to read the chunk written above - response = - hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest), null); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - Assert.assertEquals(response.getReadChunk().getData(), - writeChunkRequest.getWriteChunk().getData()); - } finally { - FileUtils.deleteDirectory(new File(testDir)); - } - } - - @Test - public void testWriteChunkWithCreateContainerFailure() throws IOException { - String testDir = GenericTestUtils.getTempPath( - TestHddsDispatcher.class.getSimpleName()); - try { - UUID scmId = UUID.randomUUID(); - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testDir); - DatanodeDetails dd = randomDatanodeDetails(); - HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); - ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest( - dd.getUuidString(), 1L, 1L); - - HddsDispatcher mockDispatcher = Mockito.spy(hddsDispatcher); - ContainerCommandResponseProto.Builder builder = ContainerUtils - .getContainerCommandResponse(writeChunkRequest, - ContainerProtos.Result.DISK_OUT_OF_SPACE, ""); - // Return DISK_OUT_OF_SPACE response when writing chunk - // with container creation. - Mockito.doReturn(builder.build()).when(mockDispatcher) - .createContainer(writeChunkRequest); - - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(HddsDispatcher.LOG); - // send write chunk request without sending create container - mockDispatcher.dispatch(writeChunkRequest, null); - // verify the error log - assertTrue(logCapturer.getOutput() - .contains("ContainerID " + writeChunkRequest.getContainerID() - + " creation failed : Result: DISK_OUT_OF_SPACE")); - } finally { - FileUtils.deleteDirectory(new File(testDir)); - } - } - - /** - * Creates HddsDispatcher instance with given infos. - * @param dd datanode detail info. - * @param scmId UUID of scm id. - * @param conf configuration be used. - * @return HddsDispatcher HddsDispatcher instance. - * @throws IOException - */ - private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, - OzoneConfiguration conf) throws IOException { - ContainerSet containerSet = new ContainerSet(); - VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf); - DatanodeStateMachine stateMachine = Mockito.mock( - DatanodeStateMachine.class); - StateContext context = Mockito.mock(StateContext.class); - Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd); - Mockito.when(context.getParent()).thenReturn(stateMachine); - ContainerMetrics metrics = ContainerMetrics.create(conf); - Map handlers = Maps.newHashMap(); - for (ContainerType containerType : ContainerType.values()) { - handlers.put(containerType, - Handler.getHandlerForContainerType(containerType, conf, context, - containerSet, volumeSet, metrics)); - } - - HddsDispatcher hddsDispatcher = new HddsDispatcher( - conf, containerSet, volumeSet, handlers, context, metrics); - hddsDispatcher.setScmId(scmId.toString()); - return hddsDispatcher; - } - - // This method has to be removed once we move scm/TestUtils.java - // from server-scm project to container-service or to common project. - private static DatanodeDetails randomDatanodeDetails() { - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - builder.setUuid(UUID.randomUUID().toString()) - .setHostName("localhost") - .setIpAddress("127.0.0.1") - .addPort(containerPort) - .addPort(ratisPort) - .addPort(restPort); - return builder.build(); - } - - private ContainerCommandRequestProto getWriteChunkRequest( - String datanodeId, Long containerId, Long localId) { - - ByteString data = ByteString.copyFrom( - UUID.randomUUID().toString().getBytes(UTF_8)); - ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo - .newBuilder() - .setChunkName( - DigestUtils.md5Hex("dummy-key") + "_stream_" - + containerId + "_chunk_" + localId) - .setOffset(0) - .setLen(data.size()) - .setChecksumData(Checksum.getNoChecksumDataProto()) - .build(); - - WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto - .newBuilder() - .setBlockID(new BlockID(containerId, localId) - .getDatanodeBlockIDProtobuf()) - .setChunkData(chunk) - .setData(data); - - return ContainerCommandRequestProto - .newBuilder() - .setContainerID(containerId) - .setCmdType(ContainerProtos.Type.WriteChunk) - .setDatanodeUuid(datanodeId) - .setWriteChunk(writeChunkRequest) - .build(); - } - - /** - * Creates container read chunk request using input container write chunk - * request. - * - * @param writeChunkRequest - Input container write chunk request - * @return container read chunk request - */ - private ContainerCommandRequestProto getReadChunkRequest( - ContainerCommandRequestProto writeChunkRequest) { - WriteChunkRequestProto writeChunk = writeChunkRequest.getWriteChunk(); - ContainerProtos.ReadChunkRequestProto.Builder readChunkRequest = - ContainerProtos.ReadChunkRequestProto.newBuilder() - .setBlockID(writeChunk.getBlockID()) - .setChunkData(writeChunk.getChunkData()); - return ContainerCommandRequestProto.newBuilder() - .setCmdType(ContainerProtos.Type.ReadChunk) - .setContainerID(writeChunk.getBlockID().getContainerID()) - .setTraceID(writeChunkRequest.getTraceID()) - .setDatanodeUuid(writeChunkRequest.getDatanodeUuid()) - .setReadChunk(readChunkRequest) - .build(); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java deleted file mode 100644 index 07c78c04989ef..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Datanode container related test-cases. - */ -package org.apache.hadoop.ozone.container.common.impl; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java deleted file mode 100644 index a6ba103174e6c..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.interfaces; - -import com.google.common.collect.Maps; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.mockito.Mockito; - -import java.util.Map; - -/** - * Tests Handler interface. - */ -public class TestHandler { - @Rule - public TestRule timeout = new Timeout(300000); - - private Configuration conf; - private HddsDispatcher dispatcher; - private ContainerSet containerSet; - private VolumeSet volumeSet; - private Handler handler; - - @Before - public void setup() throws Exception { - this.conf = new Configuration(); - this.containerSet = Mockito.mock(ContainerSet.class); - this.volumeSet = Mockito.mock(VolumeSet.class); - DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class); - DatanodeStateMachine stateMachine = Mockito.mock( - DatanodeStateMachine.class); - StateContext context = Mockito.mock(StateContext.class); - Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails); - Mockito.when(context.getParent()).thenReturn(stateMachine); - ContainerMetrics metrics = ContainerMetrics.create(conf); - Map handlers = Maps.newHashMap(); - for (ContainerProtos.ContainerType containerType : - ContainerProtos.ContainerType.values()) { - handlers.put(containerType, - Handler.getHandlerForContainerType( - containerType, conf, context, containerSet, volumeSet, metrics)); - } - this.dispatcher = new HddsDispatcher( - conf, containerSet, volumeSet, handlers, null, metrics); - } - - @Test - public void testGetKeyValueHandler() throws Exception { - Handler kvHandler = dispatcher.getHandler( - ContainerProtos.ContainerType.KeyValueContainer); - - Assert.assertTrue("getHandlerForContainerType returned incorrect handler", - (kvHandler instanceof KeyValueHandler)); - } - - @Test - public void testGetHandlerForInvalidContainerType() { - // When new ContainerProtos.ContainerType are added, increment the code - // for invalid enum. - ContainerProtos.ContainerType invalidContainerType = - ContainerProtos.ContainerType.forNumber(2); - - Assert.assertEquals("New ContainerType detected. Not an invalid " + - "containerType", invalidContainerType, null); - - Handler dispatcherHandler = dispatcher.getHandler(invalidContainerType); - Assert.assertEquals("Get Handler for Invalid ContainerType should " + - "return null.", dispatcherHandler, null); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java deleted file mode 100644 index ca3d29dada199..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * SCM Testing and Mocking Utils. - */ -package org.apache.hadoop.ozone.container.common; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java deleted file mode 100644 index aae388dd5a180..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.junit.Test; -import org.mockito.Mockito; - -import java.util.concurrent.ScheduledExecutorService; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -/** - * Test cases to test {@link ReportManager}. - */ -public class TestReportManager { - - @Test - public void testReportManagerInit() { - Configuration conf = new OzoneConfiguration(); - StateContext dummyContext = Mockito.mock(StateContext.class); - ReportPublisher dummyPublisher = Mockito.mock(ReportPublisher.class); - ReportManager.Builder builder = ReportManager.newBuilder(conf); - builder.setStateContext(dummyContext); - builder.addPublisher(dummyPublisher); - ReportManager reportManager = builder.build(); - reportManager.init(); - verify(dummyPublisher, times(1)).init(eq(dummyContext), - any(ScheduledExecutorService.class)); - - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java deleted file mode 100644 index 03f0cd4d816b7..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java +++ /dev/null @@ -1,191 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.google.protobuf.GeneratedMessage; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsIdFactory; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CommandStatus.Status; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.protocol.commands.CommandStatus; -import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.mockito.Mockito; - -import java.util.Random; -import java.util.UUID; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -/** - * Test cases to test {@link ReportPublisher}. - */ -public class TestReportPublisher { - - private static Configuration config; - - @BeforeClass - public static void setup() { - config = new OzoneConfiguration(); - } - - /** - * Dummy report publisher for testing. - */ - private static class DummyReportPublisher extends ReportPublisher { - - private final long frequency; - private int getReportCount = 0; - - DummyReportPublisher(long frequency) { - this.frequency = frequency; - } - - @Override - protected long getReportFrequency() { - return frequency; - } - - @Override - protected GeneratedMessage getReport() { - getReportCount++; - return null; - } - } - - @Test - public void testReportPublisherInit() { - ReportPublisher publisher = new DummyReportPublisher(0); - StateContext dummyContext = Mockito.mock(StateContext.class); - ScheduledExecutorService dummyExecutorService = Mockito.mock( - ScheduledExecutorService.class); - publisher.init(dummyContext, dummyExecutorService); - verify(dummyExecutorService, times(1)).schedule(publisher, - 0, TimeUnit.MILLISECONDS); - } - - @Test - public void testScheduledReport() throws InterruptedException { - ReportPublisher publisher = new DummyReportPublisher(100); - StateContext dummyContext = Mockito.mock(StateContext.class); - ScheduledExecutorService executorService = HadoopExecutors - .newScheduledThreadPool(1, - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Unit test ReportManager Thread - %d").build()); - publisher.init(dummyContext, executorService); - Thread.sleep(150); - Assert.assertEquals(1, ((DummyReportPublisher) publisher).getReportCount); - Thread.sleep(100); - Assert.assertEquals(2, ((DummyReportPublisher) publisher).getReportCount); - executorService.shutdown(); - } - - @Test - public void testPublishReport() throws InterruptedException { - ReportPublisher publisher = new DummyReportPublisher(100); - StateContext dummyContext = Mockito.mock(StateContext.class); - ScheduledExecutorService executorService = HadoopExecutors - .newScheduledThreadPool(1, - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Unit test ReportManager Thread - %d").build()); - publisher.init(dummyContext, executorService); - Thread.sleep(150); - executorService.shutdown(); - Assert.assertEquals(1, ((DummyReportPublisher) publisher).getReportCount); - verify(dummyContext, times(1)).addReport(null); - - } - - @Test - public void testCommandStatusPublisher() throws InterruptedException { - StateContext dummyContext = Mockito.mock(StateContext.class); - ReportPublisher publisher = new CommandStatusReportPublisher(); - final Map cmdStatusMap = new ConcurrentHashMap<>(); - when(dummyContext.getCommandStatusMap()).thenReturn(cmdStatusMap); - publisher.setConf(config); - - ScheduledExecutorService executorService = HadoopExecutors - .newScheduledThreadPool(1, - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Unit test ReportManager Thread - %d").build()); - publisher.init(dummyContext, executorService); - Assert.assertNull(((CommandStatusReportPublisher) publisher).getReport()); - - // Insert to status object to state context map and then get the report. - CommandStatus obj1 = CommandStatus.CommandStatusBuilder.newBuilder() - .setCmdId(HddsIdFactory.getLongId()) - .setType(Type.deleteBlocksCommand) - .setStatus(Status.PENDING) - .build(); - CommandStatus obj2 = CommandStatus.CommandStatusBuilder.newBuilder() - .setCmdId(HddsIdFactory.getLongId()) - .setType(Type.closeContainerCommand) - .setStatus(Status.EXECUTED) - .build(); - cmdStatusMap.put(obj1.getCmdId(), obj1); - cmdStatusMap.put(obj2.getCmdId(), obj2); - // We are not sending the commands whose status is PENDING. - Assert.assertEquals("Should publish report with 2 status objects", 1, - ((CommandStatusReportPublisher) publisher).getReport() - .getCmdStatusCount()); - executorService.shutdown(); - } - - /** - * Get a datanode details. - * - * @return DatanodeDetails - */ - private static DatanodeDetails getDatanodeDetails() { - String uuid = UUID.randomUUID().toString(); - Random random = new Random(); - String ipAddress = - random.nextInt(256) + "." + random.nextInt(256) + "." + random - .nextInt(256) + "." + random.nextInt(256); - - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - builder.setUuid(uuid) - .setHostName("localhost") - .setIpAddress(ipAddress) - .addPort(containerPort) - .addPort(ratisPort) - .addPort(restPort); - return builder.build(); - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java deleted file mode 100644 index f8c5fe5e27580..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -/** - * Test cases to test ReportPublisherFactory. - */ -public class TestReportPublisherFactory { - - @Rule - public ExpectedException exception = ExpectedException.none(); - - @Test - public void testGetContainerReportPublisher() { - Configuration conf = new OzoneConfiguration(); - ReportPublisherFactory factory = new ReportPublisherFactory(conf); - ReportPublisher publisher = factory - .getPublisherFor(ContainerReportsProto.class); - Assert.assertEquals(ContainerReportPublisher.class, publisher.getClass()); - Assert.assertEquals(conf, publisher.getConf()); - } - - @Test - public void testGetNodeReportPublisher() { - Configuration conf = new OzoneConfiguration(); - ReportPublisherFactory factory = new ReportPublisherFactory(conf); - ReportPublisher publisher = factory - .getPublisherFor(NodeReportProto.class); - Assert.assertEquals(NodeReportPublisher.class, publisher.getClass()); - Assert.assertEquals(conf, publisher.getConf()); - } - - @Test - public void testInvalidReportPublisher() { - Configuration conf = new OzoneConfiguration(); - ReportPublisherFactory factory = new ReportPublisherFactory(conf); - exception.expect(RuntimeException.class); - exception.expectMessage("No publisher found for report"); - factory.getPublisherFor(HddsProtos.DatanodeDetailsProto.class); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java deleted file mode 100644 index 37615bc7536b6..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.report; -/** - * This package has test cases for all the report publishers which generates - * reports that are sent to SCM via heartbeat. - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java deleted file mode 100644 index a92f236138209..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java +++ /dev/null @@ -1,227 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.UUID; - -import static java.util.Collections.singletonMap; -import static org.apache.hadoop.ozone.OzoneConsts.GB; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -/** - * Test cases to verify CloseContainerCommandHandler in datanode. - */ -public class TestCloseContainerCommandHandler { - - private static final long CONTAINER_ID = 123L; - - private OzoneContainer ozoneContainer; - private StateContext context; - private XceiverServerSpi writeChannel; - private Container container; - private Handler containerHandler; - private PipelineID pipelineID; - private PipelineID nonExistentPipelineID = PipelineID.randomId(); - - private CloseContainerCommandHandler subject = - new CloseContainerCommandHandler(); - - @Before - public void before() throws Exception { - context = mock(StateContext.class); - DatanodeStateMachine dnStateMachine = mock(DatanodeStateMachine.class); - when(dnStateMachine.getDatanodeDetails()) - .thenReturn(randomDatanodeDetails()); - when(context.getParent()).thenReturn(dnStateMachine); - - pipelineID = PipelineID.randomId(); - - KeyValueContainerData data = new KeyValueContainerData(CONTAINER_ID, GB, - pipelineID.getId().toString(), null); - - container = new KeyValueContainer(data, new OzoneConfiguration()); - ContainerSet containerSet = new ContainerSet(); - containerSet.addContainer(container); - - containerHandler = mock(Handler.class); - ContainerController controller = new ContainerController(containerSet, - singletonMap(ContainerProtos.ContainerType.KeyValueContainer, - containerHandler)); - - writeChannel = mock(XceiverServerSpi.class); - ozoneContainer = mock(OzoneContainer.class); - when(ozoneContainer.getController()).thenReturn(controller); - when(ozoneContainer.getContainerSet()).thenReturn(containerSet); - when(ozoneContainer.getWriteChannel()).thenReturn(writeChannel); - when(writeChannel.isExist(pipelineID.getProtobuf())).thenReturn(true); - when(writeChannel.isExist(nonExistentPipelineID.getProtobuf())) - .thenReturn(false); - } - - @Test - public void closeContainerWithPipeline() throws Exception { - // close a container that's associated with an existing pipeline - subject.handle(closeWithKnownPipeline(), ozoneContainer, context, null); - - verify(containerHandler) - .markContainerForClose(container); - verify(writeChannel) - .submitRequest(any(), eq(pipelineID.getProtobuf())); - verify(containerHandler, never()) - .quasiCloseContainer(container); - } - - @Test - public void closeContainerWithoutPipeline() throws IOException { - // close a container that's NOT associated with an open pipeline - subject.handle(closeWithUnknownPipeline(), ozoneContainer, context, null); - - verify(containerHandler) - .markContainerForClose(container); - verify(writeChannel, never()) - .submitRequest(any(), any()); - // Container in CLOSING state is moved to UNHEALTHY if pipeline does not - // exist. Container should not exist in CLOSING state without a pipeline. - verify(containerHandler) - .markContainerUnhealthy(container); - } - - @Test - public void forceCloseQuasiClosedContainer() throws Exception { - // force-close a container that's already quasi closed - container.getContainerData() - .setState(ContainerProtos.ContainerDataProto.State.QUASI_CLOSED); - - subject.handle(forceCloseWithoutPipeline(), ozoneContainer, context, null); - - verify(writeChannel, never()) - .submitRequest(any(), any()); - verify(containerHandler) - .closeContainer(container); - } - - @Test - public void forceCloseOpenContainer() throws Exception { - // force-close a container that's NOT associated with an open pipeline - subject.handle(forceCloseWithoutPipeline(), ozoneContainer, context, null); - - verify(writeChannel, never()) - .submitRequest(any(), any()); - // Container in CLOSING state is moved to UNHEALTHY if pipeline does not - // exist. Container should not exist in CLOSING state without a pipeline. - verify(containerHandler) - .markContainerUnhealthy(container); - } - - @Test - public void forceCloseOpenContainerWithPipeline() throws Exception { - // force-close a container that's associated with an existing pipeline - subject.handle(forceCloseWithPipeline(), ozoneContainer, context, null); - - verify(containerHandler) - .markContainerForClose(container); - verify(writeChannel) - .submitRequest(any(), any()); - verify(containerHandler, never()) - .quasiCloseContainer(container); - verify(containerHandler, never()) - .closeContainer(container); - } - - @Test - public void closeAlreadyClosedContainer() throws Exception { - container.getContainerData() - .setState(ContainerProtos.ContainerDataProto.State.CLOSED); - - // Since the container is already closed, these commands should do nothing, - // neither should they fail - subject.handle(closeWithUnknownPipeline(), ozoneContainer, context, null); - subject.handle(closeWithKnownPipeline(), ozoneContainer, context, null); - - verify(containerHandler, never()) - .markContainerForClose(container); - verify(containerHandler, never()) - .quasiCloseContainer(container); - verify(containerHandler, never()) - .closeContainer(container); - verify(writeChannel, never()) - .submitRequest(any(), any()); - } - - private CloseContainerCommand closeWithKnownPipeline() { - return new CloseContainerCommand(CONTAINER_ID, pipelineID); - } - - private CloseContainerCommand closeWithUnknownPipeline() { - return new CloseContainerCommand(CONTAINER_ID, nonExistentPipelineID); - } - - private CloseContainerCommand forceCloseWithPipeline() { - return new CloseContainerCommand(CONTAINER_ID, pipelineID, true); - } - - private CloseContainerCommand forceCloseWithoutPipeline() { - return new CloseContainerCommand(CONTAINER_ID, nonExistentPipelineID, true); - } - - /** - * Creates a random DatanodeDetails. - * @return DatanodeDetails - */ - private static DatanodeDetails randomDatanodeDetails() { - String ipAddress = "127.0.0.1"; - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - builder.setUuid(UUID.randomUUID().toString()) - .setHostName("localhost") - .setIpAddress(ipAddress) - .addPort(containerPort) - .addPort(ratisPort) - .addPort(restPort); - return builder.build(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java deleted file mode 100644 index 05ac76d1439e4..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Tests for command handlers. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java deleted file mode 100644 index 606940b51065c..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java +++ /dev/null @@ -1,295 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.states.endpoint; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine.DatanodeStates; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.protocolPB - .StorageContainerDatanodeProtocolClientSideTranslatorPB; - -import org.junit.Assert; -import org.junit.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.Mockito; - -import java.util.UUID; - -/** - * This class tests the functionality of HeartbeatEndpointTask. - */ -public class TestHeartbeatEndpointTask { - - - @Test - public void testheartbeatWithoutReports() throws Exception { - StorageContainerDatanodeProtocolClientSideTranslatorPB scm = - Mockito.mock( - StorageContainerDatanodeProtocolClientSideTranslatorPB.class); - ArgumentCaptor argument = ArgumentCaptor - .forClass(SCMHeartbeatRequestProto.class); - Mockito.when(scm.sendHeartbeat(argument.capture())) - .thenAnswer(invocation -> - SCMHeartbeatResponseProto.newBuilder() - .setDatanodeUUID( - ((SCMHeartbeatRequestProto)invocation.getArgument(0)) - .getDatanodeDetails().getUuid()) - .build()); - - HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(scm); - endpointTask.call(); - SCMHeartbeatRequestProto heartbeat = argument.getValue(); - Assert.assertTrue(heartbeat.hasDatanodeDetails()); - Assert.assertFalse(heartbeat.hasNodeReport()); - Assert.assertFalse(heartbeat.hasContainerReport()); - Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0); - Assert.assertFalse(heartbeat.hasContainerActions()); - } - - @Test - public void testheartbeatWithNodeReports() throws Exception { - Configuration conf = new OzoneConfiguration(); - StateContext context = new StateContext(conf, DatanodeStates.RUNNING, - Mockito.mock(DatanodeStateMachine.class)); - - StorageContainerDatanodeProtocolClientSideTranslatorPB scm = - Mockito.mock( - StorageContainerDatanodeProtocolClientSideTranslatorPB.class); - ArgumentCaptor argument = ArgumentCaptor - .forClass(SCMHeartbeatRequestProto.class); - Mockito.when(scm.sendHeartbeat(argument.capture())) - .thenAnswer(invocation -> - SCMHeartbeatResponseProto.newBuilder() - .setDatanodeUUID( - ((SCMHeartbeatRequestProto)invocation.getArgument(0)) - .getDatanodeDetails().getUuid()) - .build()); - - HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask( - conf, context, scm); - context.addReport(NodeReportProto.getDefaultInstance()); - endpointTask.call(); - SCMHeartbeatRequestProto heartbeat = argument.getValue(); - Assert.assertTrue(heartbeat.hasDatanodeDetails()); - Assert.assertTrue(heartbeat.hasNodeReport()); - Assert.assertFalse(heartbeat.hasContainerReport()); - Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0); - Assert.assertFalse(heartbeat.hasContainerActions()); - } - - @Test - public void testheartbeatWithContainerReports() throws Exception { - Configuration conf = new OzoneConfiguration(); - StateContext context = new StateContext(conf, DatanodeStates.RUNNING, - Mockito.mock(DatanodeStateMachine.class)); - - StorageContainerDatanodeProtocolClientSideTranslatorPB scm = - Mockito.mock( - StorageContainerDatanodeProtocolClientSideTranslatorPB.class); - ArgumentCaptor argument = ArgumentCaptor - .forClass(SCMHeartbeatRequestProto.class); - Mockito.when(scm.sendHeartbeat(argument.capture())) - .thenAnswer(invocation -> - SCMHeartbeatResponseProto.newBuilder() - .setDatanodeUUID( - ((SCMHeartbeatRequestProto)invocation.getArgument(0)) - .getDatanodeDetails().getUuid()) - .build()); - - HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask( - conf, context, scm); - context.addReport(ContainerReportsProto.getDefaultInstance()); - endpointTask.call(); - SCMHeartbeatRequestProto heartbeat = argument.getValue(); - Assert.assertTrue(heartbeat.hasDatanodeDetails()); - Assert.assertFalse(heartbeat.hasNodeReport()); - Assert.assertTrue(heartbeat.hasContainerReport()); - Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0); - Assert.assertFalse(heartbeat.hasContainerActions()); - } - - @Test - public void testheartbeatWithCommandStatusReports() throws Exception { - Configuration conf = new OzoneConfiguration(); - StateContext context = new StateContext(conf, DatanodeStates.RUNNING, - Mockito.mock(DatanodeStateMachine.class)); - - StorageContainerDatanodeProtocolClientSideTranslatorPB scm = - Mockito.mock( - StorageContainerDatanodeProtocolClientSideTranslatorPB.class); - ArgumentCaptor argument = ArgumentCaptor - .forClass(SCMHeartbeatRequestProto.class); - Mockito.when(scm.sendHeartbeat(argument.capture())) - .thenAnswer(invocation -> - SCMHeartbeatResponseProto.newBuilder() - .setDatanodeUUID( - ((SCMHeartbeatRequestProto)invocation.getArgument(0)) - .getDatanodeDetails().getUuid()) - .build()); - - HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask( - conf, context, scm); - context.addReport(CommandStatusReportsProto.getDefaultInstance()); - endpointTask.call(); - SCMHeartbeatRequestProto heartbeat = argument.getValue(); - Assert.assertTrue(heartbeat.hasDatanodeDetails()); - Assert.assertFalse(heartbeat.hasNodeReport()); - Assert.assertFalse(heartbeat.hasContainerReport()); - Assert.assertTrue(heartbeat.getCommandStatusReportsCount() != 0); - Assert.assertFalse(heartbeat.hasContainerActions()); - } - - @Test - public void testheartbeatWithContainerActions() throws Exception { - Configuration conf = new OzoneConfiguration(); - StateContext context = new StateContext(conf, DatanodeStates.RUNNING, - Mockito.mock(DatanodeStateMachine.class)); - - StorageContainerDatanodeProtocolClientSideTranslatorPB scm = - Mockito.mock( - StorageContainerDatanodeProtocolClientSideTranslatorPB.class); - ArgumentCaptor argument = ArgumentCaptor - .forClass(SCMHeartbeatRequestProto.class); - Mockito.when(scm.sendHeartbeat(argument.capture())) - .thenAnswer(invocation -> - SCMHeartbeatResponseProto.newBuilder() - .setDatanodeUUID( - ((SCMHeartbeatRequestProto)invocation.getArgument(0)) - .getDatanodeDetails().getUuid()) - .build()); - - HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask( - conf, context, scm); - context.addContainerAction(getContainerAction()); - endpointTask.call(); - SCMHeartbeatRequestProto heartbeat = argument.getValue(); - Assert.assertTrue(heartbeat.hasDatanodeDetails()); - Assert.assertFalse(heartbeat.hasNodeReport()); - Assert.assertFalse(heartbeat.hasContainerReport()); - Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0); - Assert.assertTrue(heartbeat.hasContainerActions()); - } - - @Test - public void testheartbeatWithAllReports() throws Exception { - Configuration conf = new OzoneConfiguration(); - StateContext context = new StateContext(conf, DatanodeStates.RUNNING, - Mockito.mock(DatanodeStateMachine.class)); - - StorageContainerDatanodeProtocolClientSideTranslatorPB scm = - Mockito.mock( - StorageContainerDatanodeProtocolClientSideTranslatorPB.class); - ArgumentCaptor argument = ArgumentCaptor - .forClass(SCMHeartbeatRequestProto.class); - Mockito.when(scm.sendHeartbeat(argument.capture())) - .thenAnswer(invocation -> - SCMHeartbeatResponseProto.newBuilder() - .setDatanodeUUID( - ((SCMHeartbeatRequestProto)invocation.getArgument(0)) - .getDatanodeDetails().getUuid()) - .build()); - - HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask( - conf, context, scm); - context.addReport(NodeReportProto.getDefaultInstance()); - context.addReport(ContainerReportsProto.getDefaultInstance()); - context.addReport(CommandStatusReportsProto.getDefaultInstance()); - context.addContainerAction(getContainerAction()); - endpointTask.call(); - SCMHeartbeatRequestProto heartbeat = argument.getValue(); - Assert.assertTrue(heartbeat.hasDatanodeDetails()); - Assert.assertTrue(heartbeat.hasNodeReport()); - Assert.assertTrue(heartbeat.hasContainerReport()); - Assert.assertTrue(heartbeat.getCommandStatusReportsCount() != 0); - Assert.assertTrue(heartbeat.hasContainerActions()); - } - - /** - * Creates HeartbeatEndpointTask for the given StorageContainerManager proxy. - * - * @param proxy StorageContainerDatanodeProtocolClientSideTranslatorPB - * - * @return HeartbeatEndpointTask - */ - private HeartbeatEndpointTask getHeartbeatEndpointTask( - StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) { - Configuration conf = new OzoneConfiguration(); - StateContext context = new StateContext(conf, DatanodeStates.RUNNING, - Mockito.mock(DatanodeStateMachine.class)); - return getHeartbeatEndpointTask(conf, context, proxy); - - } - - /** - * Creates HeartbeatEndpointTask with the given conf, context and - * StorageContainerManager client side proxy. - * - * @param conf Configuration - * @param context StateContext - * @param proxy StorageContainerDatanodeProtocolClientSideTranslatorPB - * - * @return HeartbeatEndpointTask - */ - private HeartbeatEndpointTask getHeartbeatEndpointTask( - Configuration conf, - StateContext context, - StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) { - DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder() - .setUuid(UUID.randomUUID().toString()) - .setHostName("localhost") - .setIpAddress("127.0.0.1") - .build(); - EndpointStateMachine endpointStateMachine = Mockito - .mock(EndpointStateMachine.class); - Mockito.when(endpointStateMachine.getEndPoint()).thenReturn(proxy); - return HeartbeatEndpointTask.newBuilder() - .setConfig(conf) - .setDatanodeDetails(datanodeDetails) - .setContext(context) - .setEndpointStateMachine(endpointStateMachine) - .build(); - } - - private ContainerAction getContainerAction() { - ContainerAction.Builder builder = ContainerAction.newBuilder(); - builder.setContainerID(1L) - .setAction(ContainerAction.Action.CLOSE) - .setReason(ContainerAction.Reason.CONTAINER_FULL); - return builder.build(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java deleted file mode 100644 index d120a5cd4b7ac..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.states.endpoint; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java deleted file mode 100644 index fb2f29b6a1389..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java +++ /dev/null @@ -1,139 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.GetSpaceUsed; -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; - -import java.io.File; -import java.util.Properties; -import java.util.UUID; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -/** - * Unit tests for {@link HddsVolume}. - */ -public class TestHddsVolume { - - private static final String DATANODE_UUID = UUID.randomUUID().toString(); - private static final String CLUSTER_ID = UUID.randomUUID().toString(); - private static final Configuration CONF = new Configuration(); - private static final String DU_CACHE_FILE = "scmUsed"; - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - private File rootDir; - private HddsVolume volume; - private File versionFile; - - @Before - public void setup() throws Exception { - rootDir = new File(folder.getRoot(), HddsVolume.HDDS_VOLUME_DIR); - volume = new HddsVolume.Builder(folder.getRoot().getPath()) - .datanodeUuid(DATANODE_UUID) - .conf(CONF) - .build(); - versionFile = HddsVolumeUtil.getVersionFile(rootDir); - } - - @Test - public void testHddsVolumeInitialization() throws Exception { - - // The initial state of HddsVolume should be "NOT_FORMATTED" when - // clusterID is not specified and the version file should not be written - // to disk. - assertTrue(volume.getClusterID() == null); - assertEquals(StorageType.DEFAULT, volume.getStorageType()); - assertEquals(HddsVolume.VolumeState.NOT_FORMATTED, - volume.getStorageState()); - assertFalse("Version file should not be created when clusterID is not " + - "known.", versionFile.exists()); - - - // Format the volume with clusterID. - volume.format(CLUSTER_ID); - - // The state of HddsVolume after formatting with clusterID should be - // NORMAL and the version file should exist. - assertTrue("Volume format should create Version file", - versionFile.exists()); - assertEquals(volume.getClusterID(), CLUSTER_ID); - assertEquals(HddsVolume.VolumeState.NORMAL, volume.getStorageState()); - } - - @Test - public void testReadPropertiesFromVersionFile() throws Exception { - volume.format(CLUSTER_ID); - - Properties properties = DatanodeVersionFile.readFrom(versionFile); - - String storageID = HddsVolumeUtil.getStorageID(properties, versionFile); - String clusterID = HddsVolumeUtil.getClusterID( - properties, versionFile, CLUSTER_ID); - String datanodeUuid = HddsVolumeUtil.getDatanodeUUID( - properties, versionFile, DATANODE_UUID); - long cTime = HddsVolumeUtil.getCreationTime( - properties, versionFile); - int layoutVersion = HddsVolumeUtil.getLayOutVersion( - properties, versionFile); - - assertEquals(volume.getStorageID(), storageID); - assertEquals(volume.getClusterID(), clusterID); - assertEquals(volume.getDatanodeUuid(), datanodeUuid); - assertEquals(volume.getCTime(), cTime); - assertEquals(volume.getLayoutVersion(), layoutVersion); - } - - @Test - public void testShutdown() throws Exception { - // Return dummy value > 0 for scmUsage so that scm cache file is written - // during shutdown. - GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class); - volume.setScmUsageForTesting(scmUsageMock); - Mockito.when(scmUsageMock.getUsed()).thenReturn(Long.valueOf(100)); - - assertTrue("Available volume should be positive", - volume.getAvailable() > 0); - - // Shutdown the volume. - volume.shutdown(); - - // Volume state should be "NON_EXISTENT" when volume is shutdown. - assertEquals(HddsVolume.VolumeState.NON_EXISTENT, volume.getStorageState()); - - // Volume should save scmUsed cache file once volume is shutdown - File scmUsedFile = new File(folder.getRoot(), DU_CACHE_FILE); - System.out.println("scmUsedFile: " + scmUsedFile); - assertTrue("scmUsed cache file should be saved on shutdown", - scmUsedFile.exists()); - - // Volume.getAvailable() should succeed even when usage thread - // is shutdown. - volume.getAvailable(); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolumeChecker.java deleted file mode 100644 index 2e267be01e8d0..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolumeChecker.java +++ /dev/null @@ -1,211 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.server.datanode.checker.Checkable; -import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.DiskChecker.DiskErrorException; -import org.apache.hadoop.util.FakeTimer; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; -import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Set; -import java.util.Optional; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.*; -import static org.mockito.Matchers.anyObject; -import static org.mockito.Mockito.*; - - -/** - * Tests for {@link HddsVolumeChecker}. - */ -@RunWith(Parameterized.class) -public class TestHddsVolumeChecker { - public static final Logger LOG = LoggerFactory.getLogger( - TestHddsVolumeChecker.class); - - @Rule - public TestName testName = new TestName(); - - @Rule - public Timeout globalTimeout = new Timeout(30_000); - - /** - * Run each test case for each possible value of {@link VolumeCheckResult}. - * Including "null" for 'throw exception'. - * @return - */ - @Parameters(name="{0}") - public static Collection data() { - List values = new ArrayList<>(); - for (VolumeCheckResult result : VolumeCheckResult.values()) { - values.add(new Object[] {result}); - } - values.add(new Object[] {null}); - return values; - } - - /** - * When null, the check call should throw an exception. - */ - private final VolumeCheckResult expectedVolumeHealth; - private static final int NUM_VOLUMES = 2; - - - public TestHddsVolumeChecker(VolumeCheckResult expectedVolumeHealth) { - this.expectedVolumeHealth = expectedVolumeHealth; - } - - /** - * Test {@link HddsVolumeChecker#checkVolume} propagates the - * check to the delegate checker. - * - * @throws Exception - */ - @Test - public void testCheckOneVolume() throws Exception { - LOG.info("Executing {}", testName.getMethodName()); - final HddsVolume volume = makeVolumes(1, expectedVolumeHealth).get(0); - final HddsVolumeChecker checker = - new HddsVolumeChecker(new HdfsConfiguration(), new FakeTimer()); - checker.setDelegateChecker(new DummyChecker()); - final AtomicLong numCallbackInvocations = new AtomicLong(0); - - /** - * Request a check and ensure it triggered {@link HddsVolume#check}. - */ - boolean result = - checker.checkVolume(volume, (healthyVolumes, failedVolumes) -> { - numCallbackInvocations.incrementAndGet(); - if (expectedVolumeHealth != null && - expectedVolumeHealth != FAILED) { - assertThat(healthyVolumes.size(), is(1)); - assertThat(failedVolumes.size(), is(0)); - } else { - assertThat(healthyVolumes.size(), is(0)); - assertThat(failedVolumes.size(), is(1)); - } - }); - - GenericTestUtils.waitFor(() -> numCallbackInvocations.get() > 0, 5, 10000); - - // Ensure that the check was invoked at least once. - verify(volume, times(1)).check(anyObject()); - if (result) { - assertThat(numCallbackInvocations.get(), is(1L)); - } - } - - /** - * Test {@link HddsVolumeChecker#checkAllVolumes} propagates - * checks for all volumes to the delegate checker. - * - * @throws Exception - */ - @Test - public void testCheckAllVolumes() throws Exception { - LOG.info("Executing {}", testName.getMethodName()); - - final List volumes = makeVolumes( - NUM_VOLUMES, expectedVolumeHealth); - final HddsVolumeChecker checker = - new HddsVolumeChecker(new HdfsConfiguration(), new FakeTimer()); - checker.setDelegateChecker(new DummyChecker()); - - Set failedVolumes = checker.checkAllVolumes(volumes); - LOG.info("Got back {} failed volumes", failedVolumes.size()); - - if (expectedVolumeHealth == null || expectedVolumeHealth == FAILED) { - assertThat(failedVolumes.size(), is(NUM_VOLUMES)); - } else { - assertTrue(failedVolumes.isEmpty()); - } - - // Ensure each volume's check() method was called exactly once. - for (HddsVolume volume : volumes) { - verify(volume, times(1)).check(anyObject()); - } - } - - /** - * A checker to wraps the result of {@link HddsVolume#check} in - * an ImmediateFuture. - */ - static class DummyChecker - implements AsyncChecker { - - @Override - public Optional> schedule( - Checkable target, - Boolean context) { - try { - LOG.info("Returning success for volume check"); - return Optional.of( - Futures.immediateFuture(target.check(context))); - } catch (Exception e) { - LOG.info("check routine threw exception " + e); - return Optional.of(Futures.immediateFailedFuture(e)); - } - } - - @Override - public void shutdownAndWait(long timeout, TimeUnit timeUnit) - throws InterruptedException { - // Nothing to cancel. - } - } - - static List makeVolumes( - int numVolumes, VolumeCheckResult health) throws Exception { - final List volumes = new ArrayList<>(numVolumes); - for (int i = 0; i < numVolumes; ++i) { - final HddsVolume volume = mock(HddsVolume.class); - - if (health != null) { - when(volume.check(any(Boolean.class))).thenReturn(health); - when(volume.check(isNull())).thenReturn(health); - } else { - final DiskErrorException de = new DiskErrorException("Fake Exception"); - when(volume.check(any(Boolean.class))).thenThrow(de); - when(volume.check(isNull())).thenThrow(de); - } - volumes.add(volume); - } - return volumes; - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java deleted file mode 100644 index d0fbf10269c4f..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import org.apache.hadoop.fs.GetSpaceUsed; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; -import org.apache.hadoop.util.ReflectionUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.IOException; -import java.util.List; -import java.util.UUID; - -/** - * Tests {@link RoundRobinVolumeChoosingPolicy}. - */ -public class TestRoundRobinVolumeChoosingPolicy { - - private RoundRobinVolumeChoosingPolicy policy; - private List volumes; - private VolumeSet volumeSet; - - private final String baseDir = MiniDFSCluster.getBaseDirectory(); - private final String volume1 = baseDir + "disk1"; - private final String volume2 = baseDir + "disk2"; - - private static final String DUMMY_IP_ADDR = "0.0.0.0"; - - @Before - public void setup() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - String dataDirKey = volume1 + "," + volume2; - conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey); - policy = ReflectionUtils.newInstance( - RoundRobinVolumeChoosingPolicy.class, null); - volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); - volumes = volumeSet.getVolumesList(); - } - - @After - public void cleanUp() { - if (volumeSet != null) { - volumeSet.shutdown(); - volumeSet = null; - } - } - - @Test - public void testRRVolumeChoosingPolicy() throws Exception { - HddsVolume hddsVolume1 = volumes.get(0); - HddsVolume hddsVolume2 = volumes.get(1); - - // Set available space in volume1 to 100L - setAvailableSpace(hddsVolume1, 100L); - - // Set available space in volume1 to 200L - setAvailableSpace(hddsVolume2, 200L); - - Assert.assertEquals(100L, hddsVolume1.getAvailable()); - Assert.assertEquals(200L, hddsVolume2.getAvailable()); - - // Test two rounds of round-robin choosing - Assert.assertEquals(hddsVolume1, policy.chooseVolume(volumes, 0)); - Assert.assertEquals(hddsVolume2, policy.chooseVolume(volumes, 0)); - Assert.assertEquals(hddsVolume1, policy.chooseVolume(volumes, 0)); - Assert.assertEquals(hddsVolume2, policy.chooseVolume(volumes, 0)); - - // The first volume has only 100L space, so the policy should - // choose the second one in case we ask for more. - Assert.assertEquals(hddsVolume2, - policy.chooseVolume(volumes, 150)); - - // Fail if no volume has enough space available - try { - policy.chooseVolume(volumes, Long.MAX_VALUE); - Assert.fail(); - } catch (IOException e) { - // Passed. - } - } - - @Test - public void testRRPolicyExceptionMessage() throws Exception { - HddsVolume hddsVolume1 = volumes.get(0); - HddsVolume hddsVolume2 = volumes.get(1); - - // Set available space in volume1 to 100L - setAvailableSpace(hddsVolume1, 100L); - - // Set available space in volume1 to 200L - setAvailableSpace(hddsVolume2, 200L); - - int blockSize = 300; - try { - policy.chooseVolume(volumes, blockSize); - Assert.fail("expected to throw DiskOutOfSpaceException"); - } catch(DiskOutOfSpaceException e) { - Assert.assertEquals("Not returnig the expected message", - "Out of space: The volume with the most available space (=" + 200 - + " B) is less than the container size (=" + blockSize + " B).", - e.getMessage()); - } - } - - private void setAvailableSpace(HddsVolume hddsVolume, long availableSpace) - throws IOException { - GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class); - hddsVolume.setScmUsageForTesting(scmUsageMock); - // Set used space to capacity -requiredAvailableSpace so that - // getAvailable() returns us the specified availableSpace. - Mockito.when(scmUsageMock.getUsed()).thenReturn( - (hddsVolume.getCapacity() - availableSpace)); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java deleted file mode 100644 index fa280ddb73084..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ /dev/null @@ -1,246 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import java.io.IOException; -import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.container.common.volume.HddsVolume - .HDDS_VOLUME_DIR; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -import java.io.File; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; - -/** - * Tests {@link VolumeSet} operations. - */ -public class TestVolumeSet { - - private OzoneConfiguration conf; - private VolumeSet volumeSet; - private final String baseDir = MiniDFSCluster.getBaseDirectory(); - private final String volume1 = baseDir + "disk1"; - private final String volume2 = baseDir + "disk2"; - private final List volumes = new ArrayList<>(); - - private static final String DUMMY_IP_ADDR = "0.0.0.0"; - - private void initializeVolumeSet() throws Exception { - volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); - } - - @Rule - public Timeout testTimeout = new Timeout(300000); - - @Before - public void setup() throws Exception { - conf = new OzoneConfiguration(); - String dataDirKey = volume1 + "," + volume2; - volumes.add(volume1); - volumes.add(volume2); - conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey); - initializeVolumeSet(); - } - - @After - public void shutdown() throws IOException { - // Delete the hdds volume root dir - List hddsVolumes = new ArrayList<>(); - hddsVolumes.addAll(volumeSet.getVolumesList()); - hddsVolumes.addAll(volumeSet.getFailedVolumesList()); - - for (HddsVolume volume : hddsVolumes) { - FileUtils.deleteDirectory(volume.getHddsRootDir()); - } - volumeSet.shutdown(); - - FileUtil.fullyDelete(new File(baseDir)); - } - - private boolean checkVolumeExistsInVolumeSet(String volume) { - for (HddsVolume hddsVolume : volumeSet.getVolumesList()) { - if (hddsVolume.getHddsRootDir().getPath().equals( - HddsVolumeUtil.getHddsRoot(volume))) { - return true; - } - } - return false; - } - - @Test - public void testVolumeSetInitialization() throws Exception { - - List volumesList = volumeSet.getVolumesList(); - - // VolumeSet initialization should add volume1 and volume2 to VolumeSet - assertEquals("VolumeSet intialization is incorrect", - volumesList.size(), volumes.size()); - assertTrue("VolumeSet not initailized correctly", - checkVolumeExistsInVolumeSet(volume1)); - assertTrue("VolumeSet not initailized correctly", - checkVolumeExistsInVolumeSet(volume2)); - } - - @Test - public void testAddVolume() { - - assertEquals(2, volumeSet.getVolumesList().size()); - - // Add a volume to VolumeSet - String volume3 = baseDir + "disk3"; - boolean success = volumeSet.addVolume(volume3); - - assertTrue(success); - assertEquals(3, volumeSet.getVolumesList().size()); - assertTrue("AddVolume did not add requested volume to VolumeSet", - checkVolumeExistsInVolumeSet(volume3)); - } - - @Test - public void testFailVolume() throws Exception { - - //Fail a volume - volumeSet.failVolume(volume1); - - // Failed volume should not show up in the volumeList - assertEquals(1, volumeSet.getVolumesList().size()); - - // Failed volume should be added to FailedVolumeList - assertEquals("Failed volume not present in FailedVolumeMap", - 1, volumeSet.getFailedVolumesList().size()); - assertEquals("Failed Volume list did not match", - HddsVolumeUtil.getHddsRoot(volume1), - volumeSet.getFailedVolumesList().get(0).getHddsRootDir().getPath()); - assertTrue(volumeSet.getFailedVolumesList().get(0).isFailed()); - - // Failed volume should not exist in VolumeMap - assertFalse(volumeSet.getVolumeMap().containsKey(volume1)); - } - - @Test - public void testRemoveVolume() throws Exception { - - assertEquals(2, volumeSet.getVolumesList().size()); - - // Remove a volume from VolumeSet - volumeSet.removeVolume(volume1); - assertEquals(1, volumeSet.getVolumesList().size()); - - // Attempting to remove a volume which does not exist in VolumeSet should - // log a warning. - LogCapturer logs = LogCapturer.captureLogs( - LogFactory.getLog(VolumeSet.class)); - volumeSet.removeVolume(volume1); - assertEquals(1, volumeSet.getVolumesList().size()); - String expectedLogMessage = "Volume : " + - HddsVolumeUtil.getHddsRoot(volume1) + " does not exist in VolumeSet"; - assertTrue("Log output does not contain expected log message: " - + expectedLogMessage, logs.getOutput().contains(expectedLogMessage)); - } - - @Test - public void testVolumeInInconsistentState() throws Exception { - assertEquals(2, volumeSet.getVolumesList().size()); - - // Add a volume to VolumeSet - String volume3 = baseDir + "disk3"; - - // Create the root volume dir and create a sub-directory within it. - File newVolume = new File(volume3, HDDS_VOLUME_DIR); - System.out.println("new volume root: " + newVolume); - newVolume.mkdirs(); - assertTrue("Failed to create new volume root", newVolume.exists()); - File dataDir = new File(newVolume, "chunks"); - dataDir.mkdirs(); - assertTrue(dataDir.exists()); - - // The new volume is in an inconsistent state as the root dir is - // non-empty but the version file does not exist. Add Volume should - // return false. - boolean success = volumeSet.addVolume(volume3); - - assertFalse(success); - assertEquals(2, volumeSet.getVolumesList().size()); - assertTrue("AddVolume should fail for an inconsistent volume", - !checkVolumeExistsInVolumeSet(volume3)); - - // Delete volume3 - File volume = new File(volume3); - FileUtils.deleteDirectory(volume); - } - - @Test - public void testShutdown() throws Exception { - List volumesList = volumeSet.getVolumesList(); - - volumeSet.shutdown(); - - // Verify that volume usage can be queried during shutdown. - for (HddsVolume volume : volumesList) { - Assert.assertNotNull(volume.getVolumeInfo().getUsageForTesting()); - volume.getAvailable(); - } - } - - @Test - public void testFailVolumes() throws Exception{ - VolumeSet volSet = null; - File readOnlyVolumePath = new File(baseDir); - //Set to readonly, so that this volume will be failed - readOnlyVolumePath.setReadOnly(); - File volumePath = GenericTestUtils.getRandomizedTestDir(); - OzoneConfiguration ozoneConfig = new OzoneConfiguration(); - ozoneConfig.set(HDDS_DATANODE_DIR_KEY, readOnlyVolumePath.getAbsolutePath() - + "," + volumePath.getAbsolutePath()); - volSet = new VolumeSet(UUID.randomUUID().toString(), ozoneConfig); - assertEquals(1, volSet.getFailedVolumesList().size()); - assertEquals(readOnlyVolumePath, volSet.getFailedVolumesList().get(0) - .getHddsRootDir()); - - //Set back to writable - try { - readOnlyVolumePath.setWritable(true); - volSet.shutdown(); - } finally { - FileUtil.fullyDelete(volumePath); - } - - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java deleted file mode 100644 index c5deff0fc7802..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ /dev/null @@ -1,190 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Set; -import java.util.UUID; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.DiskChecker.DiskErrorException; -import org.apache.hadoop.util.Timer; - -import com.google.common.collect.Iterables; -import org.apache.commons.io.FileUtils; -import org.apache.curator.shaded.com.google.common.collect.ImmutableSet; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; -import static org.hamcrest.CoreMatchers.is; -import org.junit.After; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -/** - * Verify that {@link VolumeSet} correctly checks for failed disks - * during initialization. - */ -public class TestVolumeSetDiskChecks { - public static final Logger LOG = LoggerFactory.getLogger( - TestVolumeSetDiskChecks.class); - - @Rule - public Timeout globalTimeout = new Timeout(30_000); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private Configuration conf = null; - - /** - * Cleanup volume directories. - */ - @After - public void cleanup() { - final Collection dirs = conf.getTrimmedStringCollection( - DFS_DATANODE_DATA_DIR_KEY); - - for (String d: dirs) { - FileUtils.deleteQuietly(new File(d)); - } - } - - /** - * Verify that VolumeSet creates volume root directories at startup. - * @throws IOException - */ - @Test - public void testOzoneDirsAreCreated() throws IOException { - final int numVolumes = 2; - - conf = getConfWithDataNodeDirs(numVolumes); - final VolumeSet volumeSet = - new VolumeSet(UUID.randomUUID().toString(), conf); - - assertThat(volumeSet.getVolumesList().size(), is(numVolumes)); - assertThat(volumeSet.getFailedVolumesList().size(), is(0)); - - // Verify that the Ozone dirs were created during initialization. - Collection dirs = conf.getTrimmedStringCollection( - DFS_DATANODE_DATA_DIR_KEY); - for (String d : dirs) { - assertTrue(new File(d).isDirectory()); - } - volumeSet.shutdown(); - } - - /** - * Verify that bad volumes are filtered at startup. - * @throws IOException - */ - @Test - public void testBadDirectoryDetection() throws IOException { - final int numVolumes = 5; - final int numBadVolumes = 2; - - conf = getConfWithDataNodeDirs(numVolumes); - final VolumeSet volumeSet = new VolumeSet( - UUID.randomUUID().toString(), conf) { - @Override - HddsVolumeChecker getVolumeChecker(Configuration configuration) - throws DiskErrorException { - return new DummyChecker(configuration, new Timer(), numBadVolumes); - } - }; - - assertThat(volumeSet.getFailedVolumesList().size(), is(numBadVolumes)); - assertThat(volumeSet.getVolumesList().size(), - is(numVolumes - numBadVolumes)); - volumeSet.shutdown(); - } - - /** - * Verify that all volumes are added to fail list if all volumes are bad. - */ - @Test - public void testAllVolumesAreBad() throws IOException { - final int numVolumes = 5; - - conf = getConfWithDataNodeDirs(numVolumes); - - final VolumeSet volumeSet = new VolumeSet( - UUID.randomUUID().toString(), conf) { - @Override - HddsVolumeChecker getVolumeChecker(Configuration configuration) - throws DiskErrorException { - return new DummyChecker(configuration, new Timer(), numVolumes); - } - }; - - assertEquals(volumeSet.getFailedVolumesList().size(), numVolumes); - assertEquals(volumeSet.getVolumesList().size(), 0); - volumeSet.shutdown(); - } - - /** - * Update configuration with the specified number of Datanode - * storage directories. - * @param conf - * @param numDirs - */ - private Configuration getConfWithDataNodeDirs(int numDirs) { - final Configuration ozoneConf = new OzoneConfiguration(); - final List dirs = new ArrayList<>(); - for (int i = 0; i < numDirs; ++i) { - dirs.add(GenericTestUtils.getRandomizedTestDir().getPath()); - } - ozoneConf.set(DFS_DATANODE_DATA_DIR_KEY, String.join(",", dirs)); - return ozoneConf; - } - - /** - * A no-op checker that fails the given number of volumes and succeeds - * the rest. - */ - static class DummyChecker extends HddsVolumeChecker { - private final int numBadVolumes; - - DummyChecker(Configuration conf, Timer timer, int numBadVolumes) - throws DiskErrorException { - super(conf, timer); - this.numBadVolumes = numBadVolumes; - } - - @Override - public Set checkAllVolumes(Collection volumes) - throws InterruptedException { - // Return the first 'numBadVolumes' as failed. - return ImmutableSet.copyOf(Iterables.limit(volumes, numBadVolumes)); - } - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java deleted file mode 100644 index 3328deb06d9e1..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Tests for Container Volumes. - */ -package org.apache.hadoop.ozone.container.common.volume; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java deleted file mode 100644 index 1d580a097471a..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; - -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.mock; - -/** - * This class is used to test key related operations on the container. - */ -public class TestBlockManagerImpl { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - private OzoneConfiguration config; - private String scmId = UUID.randomUUID().toString(); - private VolumeSet volumeSet; - private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; - private KeyValueContainerData keyValueContainerData; - private KeyValueContainer keyValueContainer; - private BlockData blockData; - private BlockManagerImpl blockManager; - private BlockID blockID; - - @Before - public void setUp() throws Exception { - config = new OzoneConfiguration(); - UUID datanodeId = UUID.randomUUID(); - HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot() - .getAbsolutePath()).conf(config).datanodeUuid(datanodeId - .toString()).build(); - - volumeSet = mock(VolumeSet.class); - - volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class); - Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) - .thenReturn(hddsVolume); - - keyValueContainerData = new KeyValueContainerData(1L, - (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), - datanodeId.toString()); - - keyValueContainer = new KeyValueContainer( - keyValueContainerData, config); - - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - - // Creating BlockData - blockID = new BlockID(1L, 1L); - blockData = new BlockData(blockID); - blockData.addMetadata("VOLUME", "ozone"); - blockData.addMetadata("OWNER", "hdfs"); - List chunkList = new ArrayList<>(); - ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), 0), 0, 1024); - chunkList.add(info.getProtoBufMessage()); - blockData.setChunks(chunkList); - - // Create KeyValueContainerManager - blockManager = new BlockManagerImpl(config); - - } - - @Test - public void testPutAndGetBlock() throws Exception { - assertEquals(0, keyValueContainer.getContainerData().getKeyCount()); - //Put Block - blockManager.putBlock(keyValueContainer, blockData); - - assertEquals(1, keyValueContainer.getContainerData().getKeyCount()); - //Get Block - BlockData fromGetBlockData = blockManager.getBlock(keyValueContainer, - blockData.getBlockID()); - - assertEquals(blockData.getContainerID(), fromGetBlockData.getContainerID()); - assertEquals(blockData.getLocalID(), fromGetBlockData.getLocalID()); - assertEquals(blockData.getChunks().size(), - fromGetBlockData.getChunks().size()); - assertEquals(blockData.getMetadata().size(), fromGetBlockData.getMetadata() - .size()); - - } - - @Test - public void testDeleteBlock() throws Exception { - assertEquals(0, - keyValueContainer.getContainerData().getKeyCount()); - //Put Block - blockManager.putBlock(keyValueContainer, blockData); - assertEquals(1, - keyValueContainer.getContainerData().getKeyCount()); - //Delete Block - blockManager.deleteBlock(keyValueContainer, blockID); - assertEquals(0, - keyValueContainer.getContainerData().getKeyCount()); - try { - blockManager.getBlock(keyValueContainer, blockID); - fail("testDeleteBlock"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains( - "Unable to find the block", ex); - } - } - - @Test - public void testListBlock() throws Exception { - blockManager.putBlock(keyValueContainer, blockData); - List listBlockData = blockManager.listBlock( - keyValueContainer, 1, 10); - assertNotNull(listBlockData); - assertTrue(listBlockData.size() == 1); - - for (long i = 2; i <= 10; i++) { - blockID = new BlockID(1L, i); - blockData = new BlockData(blockID); - blockData.addMetadata("VOLUME", "ozone"); - blockData.addMetadata("OWNER", "hdfs"); - List chunkList = new ArrayList<>(); - ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), 0), 0, 1024); - chunkList.add(info.getProtoBufMessage()); - blockData.setChunks(chunkList); - blockManager.putBlock(keyValueContainer, blockData); - } - - listBlockData = blockManager.listBlock( - keyValueContainer, 1, 10); - assertNotNull(listBlockData); - assertTrue(listBlockData.size() == 10); - } - - @Test - public void testGetNoSuchBlock() throws Exception { - assertEquals(0, - keyValueContainer.getContainerData().getKeyCount()); - //Put Block - blockManager.putBlock(keyValueContainer, blockData); - assertEquals(1, - keyValueContainer.getContainerData().getKeyCount()); - //Delete Block - blockManager.deleteBlock(keyValueContainer, blockID); - assertEquals(0, - keyValueContainer.getContainerData().getKeyCount()); - try { - //Since the block has been deleted, we should not be able to find it - blockManager.getBlock(keyValueContainer, blockID); - fail("testGetNoSuchBlock failed"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains( - "Unable to find the block", ex); - assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult()); - } - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java deleted file mode 100644 index 84ab56da86459..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java +++ /dev/null @@ -1,292 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; - -import java.io.File; -import java.nio.ByteBuffer; -import java.util.UUID; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.junit.Assert.*; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.mock; - -/** - * This class is used to test ChunkManager operations. - */ -public class TestChunkManagerImpl { - - private OzoneConfiguration config; - private String scmId = UUID.randomUUID().toString(); - private VolumeSet volumeSet; - private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; - private HddsVolume hddsVolume; - private KeyValueContainerData keyValueContainerData; - private KeyValueContainer keyValueContainer; - private BlockID blockID; - private ChunkManagerImpl chunkManager; - private ChunkInfo chunkInfo; - private ByteBuffer data; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - @Before - public void setUp() throws Exception { - config = new OzoneConfiguration(); - UUID datanodeId = UUID.randomUUID(); - hddsVolume = new HddsVolume.Builder(folder.getRoot() - .getAbsolutePath()).conf(config).datanodeUuid(datanodeId - .toString()).build(); - - volumeSet = mock(VolumeSet.class); - - volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class); - Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) - .thenReturn(hddsVolume); - - keyValueContainerData = new KeyValueContainerData(1L, - (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), - datanodeId.toString()); - - keyValueContainer = new KeyValueContainer(keyValueContainerData, config); - - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - - data = ByteBuffer.wrap("testing write chunks".getBytes(UTF_8)); - // Creating BlockData - blockID = new BlockID(1L, 1L); - chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), 0), 0, data.capacity()); - - // Create a ChunkManager object. - chunkManager = new ChunkManagerImpl(true); - - } - - private DispatcherContext getDispatcherContext() { - return new DispatcherContext.Builder().build(); - } - - @Test - public void testWriteChunkStageWriteAndCommit() throws Exception { - //As in Setup, we try to create container, these paths should exist. - assertTrue(keyValueContainerData.getChunksPath() != null); - File chunksPath = new File(keyValueContainerData.getChunksPath()); - assertTrue(chunksPath.exists()); - // Initially chunks folder should be empty. - assertTrue(chunksPath.listFiles().length == 0); - - // As no chunks are written to the volume writeBytes should be 0 - checkWriteIOStats(0, 0); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - new DispatcherContext.Builder() - .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build()); - // Now a chunk file is being written with Stage WRITE_DATA, so it should - // create a temporary chunk file. - assertTrue(chunksPath.listFiles().length == 1); - - long term = 0; - long index = 0; - File chunkFile = ChunkUtils.getChunkFile(keyValueContainerData, chunkInfo); - File tempChunkFile = new File(chunkFile.getParent(), - chunkFile.getName() + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER - + OzoneConsts.CONTAINER_TEMPORARY_CHUNK_PREFIX - + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + term - + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + index); - - // As chunk write stage is WRITE_DATA, temp chunk file will be created. - assertTrue(tempChunkFile.exists()); - - checkWriteIOStats(data.capacity(), 1); - - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - new DispatcherContext.Builder() - .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build()); - - checkWriteIOStats(data.capacity(), 1); - - // Old temp file should have been renamed to chunk file. - assertTrue(chunksPath.listFiles().length == 1); - - // As commit happened, chunk file should exist. - assertTrue(chunkFile.exists()); - assertFalse(tempChunkFile.exists()); - - } - - @Test - public void testWriteChunkIncorrectLength() throws Exception { - try { - long randomLength = 200L; - chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), 0), 0, randomLength); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - getDispatcherContext()); - fail("testWriteChunkIncorrectLength failed"); - } catch (StorageContainerException ex) { - // As we got an exception, writeBytes should be 0. - checkWriteIOStats(0, 0); - GenericTestUtils.assertExceptionContains("data array does not match " + - "the length ", ex); - assertEquals(ContainerProtos.Result.INVALID_WRITE_SIZE, ex.getResult()); - } - } - - @Test - public void testWriteChunkStageCombinedData() throws Exception { - //As in Setup, we try to create container, these paths should exist. - assertTrue(keyValueContainerData.getChunksPath() != null); - File chunksPath = new File(keyValueContainerData.getChunksPath()); - assertTrue(chunksPath.exists()); - // Initially chunks folder should be empty. - assertTrue(chunksPath.listFiles().length == 0); - checkWriteIOStats(0, 0); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - getDispatcherContext()); - // Now a chunk file is being written with Stage COMBINED_DATA, so it should - // create a chunk file. - assertTrue(chunksPath.listFiles().length == 1); - File chunkFile = ChunkUtils.getChunkFile(keyValueContainerData, chunkInfo); - assertTrue(chunkFile.exists()); - checkWriteIOStats(data.capacity(), 1); - } - - @Test - public void testReadChunk() throws Exception { - checkWriteIOStats(0, 0); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - getDispatcherContext()); - checkWriteIOStats(data.capacity(), 1); - checkReadIOStats(0, 0); - ByteBuffer expectedData = chunkManager.readChunk(keyValueContainer, blockID, - chunkInfo, getDispatcherContext()); - assertEquals(expectedData.limit()-expectedData.position(), - chunkInfo.getLen()); - assertTrue(expectedData.rewind().equals(data.rewind())); - checkReadIOStats(expectedData.capacity(), 1); - } - - @Test - public void testDeleteChunk() throws Exception { - File chunksPath = new File(keyValueContainerData.getChunksPath()); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - getDispatcherContext()); - assertTrue(chunksPath.listFiles().length == 1); - chunkManager.deleteChunk(keyValueContainer, blockID, chunkInfo); - assertTrue(chunksPath.listFiles().length == 0); - } - - @Test - public void testDeleteChunkUnsupportedRequest() throws Exception { - try { - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - getDispatcherContext()); - long randomLength = 200L; - chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), 0), 0, randomLength); - chunkManager.deleteChunk(keyValueContainer, blockID, chunkInfo); - fail("testDeleteChunkUnsupportedRequest"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("Not Supported Operation.", ex); - assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, ex.getResult()); - } - } - - @Test - public void testReadChunkFileNotExists() throws Exception { - try { - // trying to read a chunk, where chunk file does not exist - ByteBuffer expectedData = chunkManager.readChunk(keyValueContainer, - blockID, chunkInfo, getDispatcherContext()); - fail("testReadChunkFileNotExists failed"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("Unable to find the chunk " + - "file.", ex); - assertEquals(ContainerProtos.Result.UNABLE_TO_FIND_CHUNK, ex.getResult()); - } - } - - @Test - public void testWriteAndReadChunkMultipleTimes() throws Exception { - for (int i=0; i<100; i++) { - chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), i), 0, data.capacity()); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - getDispatcherContext()); - data.rewind(); - } - checkWriteIOStats(data.capacity()*100, 100); - assertTrue(hddsVolume.getVolumeIOStats().getWriteTime() > 0); - - for (int i=0; i<100; i++) { - chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), i), 0, data.capacity()); - chunkManager.readChunk(keyValueContainer, blockID, chunkInfo, - getDispatcherContext()); - } - checkReadIOStats(data.capacity()*100, 100); - assertTrue(hddsVolume.getVolumeIOStats().getReadTime() > 0); - } - - - /** - * Check WriteIO stats. - * @param length - * @param opCount - */ - private void checkWriteIOStats(long length, long opCount) { - VolumeIOStats volumeIOStats = hddsVolume.getVolumeIOStats(); - assertEquals(length, volumeIOStats.getWriteBytes()); - assertEquals(opCount, volumeIOStats.getWriteOpCount()); - } - - /** - * Check ReadIO stats. - * @param length - * @param opCount - */ - private void checkReadIOStats(long length, long opCount) { - VolumeIOStats volumeIOStats = hddsVolume.getVolumeIOStats(); - assertEquals(length, volumeIOStats.getReadBytes()); - assertEquals(opCount, volumeIOStats.getReadOpCount()); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java deleted file mode 100644 index 4fdd994fb1193..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ /dev/null @@ -1,284 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import java.io.File; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.UUID; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_METADATA_STORE_IMPL_ROCKSDB; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -/** - * This class is used to test KeyValue container block iterator. - */ -@RunWith(Parameterized.class) -public class TestKeyValueBlockIterator { - - private KeyValueContainer container; - private KeyValueContainerData containerData; - private VolumeSet volumeSet; - private Configuration conf; - private File testRoot; - - private final String storeImpl; - - public TestKeyValueBlockIterator(String metadataImpl) { - this.storeImpl = metadataImpl; - } - - @Parameterized.Parameters - public static Collection data() { - return Arrays.asList(new Object[][] { - {OZONE_METADATA_STORE_IMPL_LEVELDB}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB}}); - } - - @Before - public void setUp() throws Exception { - testRoot = GenericTestUtils.getRandomizedTestDir(); - conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); - conf.set(OZONE_METADATA_STORE_IMPL, storeImpl); - volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); - } - - - @After - public void tearDown() { - volumeSet.shutdown(); - FileUtil.fullyDelete(testRoot); - } - - @Test - public void testKeyValueBlockIteratorWithMixedBlocks() throws Exception { - - long containerID = 100L; - int deletedBlocks = 5; - int normalBlocks = 5; - createContainerWithBlocks(containerID, normalBlocks, deletedBlocks); - String containerPath = new File(containerData.getMetadataPath()) - .getParent(); - try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerID, new File(containerPath))) { - - int counter = 0; - while (keyValueBlockIterator.hasNext()) { - BlockData blockData = keyValueBlockIterator.nextBlock(); - assertEquals(blockData.getLocalID(), counter++); - } - - assertFalse(keyValueBlockIterator.hasNext()); - - keyValueBlockIterator.seekToFirst(); - counter = 0; - while (keyValueBlockIterator.hasNext()) { - BlockData blockData = keyValueBlockIterator.nextBlock(); - assertEquals(blockData.getLocalID(), counter++); - } - assertFalse(keyValueBlockIterator.hasNext()); - - try { - keyValueBlockIterator.nextBlock(); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Block Iterator reached end " + - "for ContainerID " + containerID, ex); - } - } - } - - @Test - public void testKeyValueBlockIteratorWithNextBlock() throws Exception { - long containerID = 101L; - createContainerWithBlocks(containerID, 2, 0); - String containerPath = new File(containerData.getMetadataPath()) - .getParent(); - try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerID, new File(containerPath))) { - long blockID = 0L; - assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID()); - assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); - - try { - keyValueBlockIterator.nextBlock(); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Block Iterator reached end " + - "for ContainerID " + containerID, ex); - } - } - } - - @Test - public void testKeyValueBlockIteratorWithHasNext() throws Exception { - long containerID = 102L; - createContainerWithBlocks(containerID, 2, 0); - String containerPath = new File(containerData.getMetadataPath()) - .getParent(); - try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerID, new File(containerPath))) { - long blockID = 0L; - - // Even calling multiple times hasNext() should not move entry forward. - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID()); - - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); - - keyValueBlockIterator.seekToLast(); - assertTrue(keyValueBlockIterator.hasNext()); - assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); - - keyValueBlockIterator.seekToFirst(); - blockID = 0L; - assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID()); - assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); - - try { - keyValueBlockIterator.nextBlock(); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Block Iterator reached end " + - "for ContainerID " + containerID, ex); - } - } - } - - @Test - public void testKeyValueBlockIteratorWithFilter() throws Exception { - long containerId = 103L; - int deletedBlocks = 5; - int normalBlocks = 5; - createContainerWithBlocks(containerId, normalBlocks, deletedBlocks); - String containerPath = new File(containerData.getMetadataPath()) - .getParent(); - try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerId, new File(containerPath), MetadataKeyFilters - .getDeletingKeyFilter())) { - - int counter = 5; - while (keyValueBlockIterator.hasNext()) { - BlockData blockData = keyValueBlockIterator.nextBlock(); - assertEquals(blockData.getLocalID(), counter++); - } - } - } - - @Test - public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws - Exception { - long containerId = 104L; - createContainerWithBlocks(containerId, 0, 5); - String containerPath = new File(containerData.getMetadataPath()) - .getParent(); - try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerId, new File(containerPath))) { - //As all blocks are deleted blocks, blocks does not match with normal key - // filter. - assertFalse(keyValueBlockIterator.hasNext()); - } - } - - /** - * Creates a container with specified number of normal blocks and deleted - * blocks. First it will insert normal blocks, and then it will insert - * deleted blocks. - * @param containerId - * @param normalBlocks - * @param deletedBlocks - * @throws Exception - */ - private void createContainerWithBlocks(long containerId, int - normalBlocks, int deletedBlocks) throws - Exception { - containerData = new KeyValueContainerData(containerId, - (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - container = new KeyValueContainer(containerData, conf); - container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID - .randomUUID().toString()); - try(ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, - conf)) { - - List chunkList = new ArrayList<>(); - ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024); - chunkList.add(info.getProtoBufMessage()); - - for (int i = 0; i < normalBlocks; i++) { - BlockID blockID = new BlockID(containerId, i); - BlockData blockData = new BlockData(blockID); - blockData.setChunks(chunkList); - metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()), - blockData - .getProtoBufMessage().toByteArray()); - } - - for (int i = normalBlocks; i < deletedBlocks; i++) { - BlockID blockID = new BlockID(containerId, i); - BlockData blockData = new BlockData(blockID); - blockData.setChunks(chunkList); - metadataStore.getStore().put(DFSUtil.string2Bytes(OzoneConsts - .DELETING_KEY_PREFIX + blockID.getLocalID()), blockData - .getProtoBufMessage().toByteArray()); - } - } - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java deleted file mode 100644 index 81d3065833ebe..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume - .RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.DiskChecker; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; - -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import org.mockito.Mockito; - -import java.io.File; - -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Map; -import java.util.List; -import java.util.UUID; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.ratis.util.Preconditions.assertTrue; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.fail; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.mock; - -/** - * Class to test KeyValue Container operations. - */ -public class TestKeyValueContainer { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - - private OzoneConfiguration conf; - private String scmId = UUID.randomUUID().toString(); - private VolumeSet volumeSet; - private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; - private KeyValueContainerData keyValueContainerData; - private KeyValueContainer keyValueContainer; - private UUID datanodeId; - - @Before - public void setUp() throws Exception { - conf = new OzoneConfiguration(); - datanodeId = UUID.randomUUID(); - HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot() - .getAbsolutePath()).conf(conf).datanodeUuid(datanodeId - .toString()).build(); - - volumeSet = mock(VolumeSet.class); - volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class); - Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) - .thenReturn(hddsVolume); - - keyValueContainerData = new KeyValueContainerData(1L, - (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), - datanodeId.toString()); - - keyValueContainer = new KeyValueContainer( - keyValueContainerData, conf); - - } - - @Test - public void testBlockIterator() throws Exception{ - keyValueContainerData = new KeyValueContainerData(100L, - (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), - datanodeId.toString()); - keyValueContainer = new KeyValueContainer( - keyValueContainerData, conf); - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - KeyValueBlockIterator blockIterator = keyValueContainer.blockIterator(); - //As no blocks created, hasNext should return false. - assertFalse(blockIterator.hasNext()); - int blockCount = 10; - addBlocks(blockCount); - blockIterator = keyValueContainer.blockIterator(); - assertTrue(blockIterator.hasNext()); - BlockData blockData; - int blockCounter = 0; - while(blockIterator.hasNext()) { - blockData = blockIterator.nextBlock(); - assertEquals(blockCounter++, blockData.getBlockID().getLocalID()); - } - assertEquals(blockCount, blockCounter); - } - - private void addBlocks(int count) throws Exception { - long containerId = keyValueContainerData.getContainerID(); - - try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer - .getContainerData(), conf)) { - for (int i = 0; i < count; i++) { - // Creating BlockData - BlockID blockID = new BlockID(containerId, i); - BlockData blockData = new BlockData(blockID); - blockData.addMetadata("VOLUME", "ozone"); - blockData.addMetadata("OWNER", "hdfs"); - List chunkList = new ArrayList<>(); - ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), 0), 0, 1024); - chunkList.add(info.getProtoBufMessage()); - blockData.setChunks(chunkList); - metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()), - blockData - .getProtoBufMessage().toByteArray()); - } - } - } - - @SuppressWarnings("RedundantCast") - @Test - public void testCreateContainer() throws Exception { - - // Create Container. - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - - keyValueContainerData = keyValueContainer - .getContainerData(); - - String containerMetaDataPath = keyValueContainerData - .getMetadataPath(); - String chunksPath = keyValueContainerData.getChunksPath(); - - // Check whether containerMetaDataPath and chunksPath exists or not. - assertTrue(containerMetaDataPath != null); - assertTrue(chunksPath != null); - //Check whether container file and container db file exists or not. - assertTrue(keyValueContainer.getContainerFile().exists(), - ".Container File does not exist"); - assertTrue(keyValueContainer.getContainerDBFile().exists(), "Container " + - "DB does not exist"); - } - - @Test - public void testContainerImportExport() throws Exception { - - long containerId = keyValueContainer.getContainerData().getContainerID(); - // Create Container. - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - - - keyValueContainerData = keyValueContainer - .getContainerData(); - - keyValueContainerData.setState( - ContainerProtos.ContainerDataProto.State.CLOSED); - - int numberOfKeysToWrite = 12; - //write one few keys to check the key count after import - try(ReferenceCountedDB metadataStore = - BlockUtils.getDB(keyValueContainerData, conf)) { - for (int i = 0; i < numberOfKeysToWrite; i++) { - metadataStore.getStore().put(("test" + i).getBytes(UTF_8), - "test".getBytes(UTF_8)); - } - } - BlockUtils.removeDB(keyValueContainerData, conf); - - Map metadata = new HashMap<>(); - metadata.put("key1", "value1"); - keyValueContainer.update(metadata, true); - - //destination path - File folderToExport = folder.newFile("exported.tar.gz"); - - TarContainerPacker packer = new TarContainerPacker(); - - //export the container - try (FileOutputStream fos = new FileOutputStream(folderToExport)) { - keyValueContainer - .exportContainerData(fos, packer); - } - - //delete the original one - keyValueContainer.delete(); - - //create a new one - KeyValueContainerData containerData = - new KeyValueContainerData(containerId, 1, - keyValueContainerData.getMaxSize(), UUID.randomUUID().toString(), - datanodeId.toString()); - KeyValueContainer container = new KeyValueContainer(containerData, conf); - - HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet - .getVolumesList(), 1); - String hddsVolumeDir = containerVolume.getHddsRootDir().toString(); - - container.populatePathFields(scmId, containerVolume, hddsVolumeDir); - try (FileInputStream fis = new FileInputStream(folderToExport)) { - container.importContainerData(fis, packer); - } - - Assert.assertEquals("value1", containerData.getMetadata().get("key1")); - Assert.assertEquals(keyValueContainerData.getContainerDBType(), - containerData.getContainerDBType()); - Assert.assertEquals(keyValueContainerData.getState(), - containerData.getState()); - Assert.assertEquals(numberOfKeysToWrite, - containerData.getKeyCount()); - Assert.assertEquals(keyValueContainerData.getLayOutVersion(), - containerData.getLayOutVersion()); - Assert.assertEquals(keyValueContainerData.getMaxSize(), - containerData.getMaxSize()); - Assert.assertEquals(keyValueContainerData.getBytesUsed(), - containerData.getBytesUsed()); - - //Can't overwrite existing container - try { - try (FileInputStream fis = new FileInputStream(folderToExport)) { - container.importContainerData(fis, packer); - } - fail("Container is imported twice. Previous files are overwritten"); - } catch (IOException ex) { - //all good - } - - } - - @Test - public void testDuplicateContainer() throws Exception { - try { - // Create Container. - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - fail("testDuplicateContainer failed"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("ContainerFile already " + - "exists", ex); - assertEquals(ContainerProtos.Result.CONTAINER_ALREADY_EXISTS, ex - .getResult()); - } - } - - @Test - public void testDiskFullExceptionCreateContainer() throws Exception { - - Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) - .thenThrow(DiskChecker.DiskOutOfSpaceException.class); - try { - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - fail("testDiskFullExceptionCreateContainer failed"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("disk out of space", - ex); - assertEquals(ContainerProtos.Result.DISK_OUT_OF_SPACE, ex.getResult()); - } - } - - @Test - public void testDeleteContainer() throws Exception { - keyValueContainerData.setState(ContainerProtos.ContainerDataProto.State - .CLOSED); - keyValueContainer = new KeyValueContainer( - keyValueContainerData, conf); - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - keyValueContainer.delete(); - - String containerMetaDataPath = keyValueContainerData - .getMetadataPath(); - File containerMetaDataLoc = new File(containerMetaDataPath); - - assertFalse("Container directory still exists", containerMetaDataLoc - .getParentFile().exists()); - - assertFalse("Container File still exists", - keyValueContainer.getContainerFile().exists()); - assertFalse("Container DB file still exists", - keyValueContainer.getContainerDBFile().exists()); - } - - - @Test - public void testCloseContainer() throws Exception { - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - keyValueContainer.close(); - - keyValueContainerData = keyValueContainer - .getContainerData(); - - assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, - keyValueContainerData.getState()); - - //Check state in the .container file - String containerMetaDataPath = keyValueContainerData - .getMetadataPath(); - File containerFile = keyValueContainer.getContainerFile(); - - keyValueContainerData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, - keyValueContainerData.getState()); - } - - @Test - public void testReportOfUnhealthyContainer() throws Exception { - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - Assert.assertNotNull(keyValueContainer.getContainerReport()); - keyValueContainer.markContainerUnhealthy(); - File containerFile = keyValueContainer.getContainerFile(); - keyValueContainerData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - assertEquals(ContainerProtos.ContainerDataProto.State.UNHEALTHY, - keyValueContainerData.getState()); - Assert.assertNotNull(keyValueContainer.getContainerReport()); - } - - @Test - public void testUpdateContainer() throws IOException { - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - Map metadata = new HashMap<>(); - metadata.put("VOLUME", "ozone"); - metadata.put("OWNER", "hdfs"); - keyValueContainer.update(metadata, true); - - keyValueContainerData = keyValueContainer - .getContainerData(); - - assertEquals(2, keyValueContainerData.getMetadata().size()); - - //Check metadata in the .container file - File containerFile = keyValueContainer.getContainerFile(); - - keyValueContainerData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - assertEquals(2, keyValueContainerData.getMetadata().size()); - - } - - @Test - public void testUpdateContainerUnsupportedRequest() throws Exception { - try { - keyValueContainerData.setState( - ContainerProtos.ContainerDataProto.State.CLOSED); - keyValueContainer = new KeyValueContainer(keyValueContainerData, conf); - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - Map metadata = new HashMap<>(); - metadata.put("VOLUME", "ozone"); - keyValueContainer.update(metadata, false); - fail("testUpdateContainerUnsupportedRequest failed"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("Updating a closed container " + - "without force option is not allowed", ex); - assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, ex - .getResult()); - } - } - - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java deleted file mode 100644 index fe702fc693a26..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ /dev/null @@ -1,270 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import com.google.common.primitives.Longs; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; -import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import java.io.File; -import java.io.RandomAccessFile; -import java.util.Arrays; -import java.util.ArrayList; -import java.nio.ByteBuffer; -import java.util.Collection; -import java.util.List; -import java.util.UUID; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; - - -/** - * Basic sanity test for the KeyValueContainerCheck class. - */ -@RunWith(Parameterized.class) public class TestKeyValueContainerCheck { - private final String storeImpl; - private KeyValueContainer container; - private KeyValueContainerData containerData; - private VolumeSet volumeSet; - private OzoneConfiguration conf; - private File testRoot; - - public TestKeyValueContainerCheck(String metadataImpl) { - this.storeImpl = metadataImpl; - } - - @Parameterized.Parameters public static Collection data() { - return Arrays.asList(new Object[][] {{OZONE_METADATA_STORE_IMPL_LEVELDB}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB}}); - } - - @Before public void setUp() throws Exception { - this.testRoot = GenericTestUtils.getRandomizedTestDir(); - conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); - conf.set(OZONE_METADATA_STORE_IMPL, storeImpl); - volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); - } - - @After public void teardown() { - volumeSet.shutdown(); - FileUtil.fullyDelete(testRoot); - } - - /** - * Sanity test, when there are no corruptions induced. - */ - @Test - public void testKeyValueContainerCheckNoCorruption() throws Exception { - long containerID = 101; - int deletedBlocks = 1; - int normalBlocks = 3; - int chunksPerBlock = 4; - ContainerScrubberConfiguration c = conf.getObject( - ContainerScrubberConfiguration.class); - - // test Closed Container - createContainerWithBlocks(containerID, normalBlocks, deletedBlocks, - chunksPerBlock); - - KeyValueContainerCheck kvCheck = - new KeyValueContainerCheck(containerData.getMetadataPath(), conf, - containerID); - - // first run checks on a Open Container - boolean valid = kvCheck.fastCheck(); - assertTrue(valid); - - container.close(); - - // next run checks on a Closed Container - valid = kvCheck.fullCheck(new DataTransferThrottler( - c.getBandwidthPerVolume()), null); - assertTrue(valid); - } - - /** - * Sanity test, when there are corruptions induced. - */ - @Test - public void testKeyValueContainerCheckCorruption() throws Exception { - long containerID = 102; - int deletedBlocks = 1; - int normalBlocks = 3; - int chunksPerBlock = 4; - ContainerScrubberConfiguration sc = conf.getObject( - ContainerScrubberConfiguration.class); - - // test Closed Container - createContainerWithBlocks(containerID, normalBlocks, deletedBlocks, - chunksPerBlock); - - container.close(); - - KeyValueContainerCheck kvCheck = - new KeyValueContainerCheck(containerData.getMetadataPath(), conf, - containerID); - - File metaDir = new File(containerData.getMetadataPath()); - File dbFile = KeyValueContainerLocationUtil - .getContainerDBFile(metaDir, containerID); - containerData.setDbFile(dbFile); - try (ReferenceCountedDB ignored = - BlockUtils.getDB(containerData, conf); - KeyValueBlockIterator kvIter = new KeyValueBlockIterator(containerID, - new File(containerData.getContainerPath()))) { - BlockData block = kvIter.nextBlock(); - assertFalse(block.getChunks().isEmpty()); - ContainerProtos.ChunkInfo c = block.getChunks().get(0); - File chunkFile = ChunkUtils.getChunkFile(containerData, - ChunkInfo.getFromProtoBuf(c)); - long length = chunkFile.length(); - assertTrue(length > 0); - // forcefully truncate the file to induce failure. - try (RandomAccessFile file = new RandomAccessFile(chunkFile, "rws")) { - file.setLength(length / 2); - } - assertEquals(length/2, chunkFile.length()); - } - - // metadata check should pass. - boolean valid = kvCheck.fastCheck(); - assertTrue(valid); - - // checksum validation should fail. - valid = kvCheck.fullCheck(new DataTransferThrottler( - sc.getBandwidthPerVolume()), null); - assertFalse(valid); - } - - /** - * Creates a container with normal and deleted blocks. - * First it will insert normal blocks, and then it will insert - * deleted blocks. - */ - private void createContainerWithBlocks(long containerId, int normalBlocks, - int deletedBlocks, int chunksPerBlock) throws Exception { - String strBlock = "block"; - String strChunk = "-chunkFile"; - long totalBlocks = normalBlocks + deletedBlocks; - int unitLen = 1024; - int chunkLen = 3 * unitLen; - int bytesPerChecksum = 2 * unitLen; - Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256, - bytesPerChecksum); - byte[] chunkData = RandomStringUtils.randomAscii(chunkLen).getBytes(); - ChecksumData checksumData = checksum.computeChecksum(chunkData); - - containerData = new KeyValueContainerData(containerId, - (long) StorageUnit.BYTES.toBytes( - chunksPerBlock * chunkLen * totalBlocks), - UUID.randomUUID().toString(), UUID.randomUUID().toString()); - container = new KeyValueContainer(containerData, conf); - container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), - UUID.randomUUID().toString()); - try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, - conf)) { - ChunkManagerImpl chunkManager = new ChunkManagerImpl(true); - - assertNotNull(containerData.getChunksPath()); - File chunksPath = new File(containerData.getChunksPath()); - assertTrue(chunksPath.exists()); - // Initially chunks folder should be empty. - File[] chunkFilesBefore = chunksPath.listFiles(); - assertNotNull(chunkFilesBefore); - assertEquals(0, chunkFilesBefore.length); - - List chunkList = new ArrayList<>(); - for (int i = 0; i < totalBlocks; i++) { - BlockID blockID = new BlockID(containerId, i); - BlockData blockData = new BlockData(blockID); - - chunkList.clear(); - for (long chunkCount = 0; chunkCount < chunksPerBlock; chunkCount++) { - String chunkName = strBlock + i + strChunk + chunkCount; - ChunkInfo info = new ChunkInfo(chunkName, 0, chunkLen); - info.setChecksumData(checksumData); - chunkList.add(info.getProtoBufMessage()); - chunkManager - .writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), - new DispatcherContext.Builder() - .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA) - .build()); - chunkManager - .writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), - new DispatcherContext.Builder() - .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA) - .build()); - } - blockData.setChunks(chunkList); - - if (i >= normalBlocks) { - // deleted key - metadataStore.getStore().put(DFSUtil.string2Bytes( - OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID()), - blockData.getProtoBufMessage().toByteArray()); - } else { - // normal key - metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()), - blockData.getProtoBufMessage().toByteArray()); - } - } - - File[] chunkFilesAfter = chunksPath.listFiles(); - assertNotNull(chunkFilesAfter); - assertEquals((deletedBlocks + normalBlocks) * chunksPerBlock, - chunkFilesAfter.length); - } - } - -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java deleted file mode 100644 index c3e67c7ae6b4e..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; -import org.junit.rules.Timeout; -import org.mockito.Mockito; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.UUID; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.OPEN; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.mock; - -/** - * Tests unhealthy container functionality in the {@link KeyValueContainer} - * class. - */ -public class TestKeyValueContainerMarkUnhealthy { - public static final Logger LOG = LoggerFactory.getLogger( - TestKeyValueContainerMarkUnhealthy.class); - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - @Rule - public Timeout timeout = new Timeout(600_000); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private OzoneConfiguration conf; - private String scmId = UUID.randomUUID().toString(); - private VolumeSet volumeSet; - private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; - private KeyValueContainerData keyValueContainerData; - private KeyValueContainer keyValueContainer; - private UUID datanodeId; - - @Before - public void setUp() throws Exception { - conf = new OzoneConfiguration(); - datanodeId = UUID.randomUUID(); - HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot() - .getAbsolutePath()).conf(conf).datanodeUuid(datanodeId - .toString()).build(); - - volumeSet = mock(VolumeSet.class); - volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class); - Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) - .thenReturn(hddsVolume); - - keyValueContainerData = new KeyValueContainerData(1L, - (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), - datanodeId.toString()); - final File metaDir = GenericTestUtils.getRandomizedTestDir(); - metaDir.mkdirs(); - keyValueContainerData.setMetadataPath(metaDir.getPath()); - - - keyValueContainer = new KeyValueContainer( - keyValueContainerData, conf); - } - - @After - public void teardown() { - volumeSet = null; - keyValueContainer = null; - keyValueContainerData = null; - } - - /** - * Verify that the .container file is correctly updated when a - * container is marked as unhealthy. - * - * @throws IOException - */ - @Test - public void testMarkContainerUnhealthy() throws IOException { - assertThat(keyValueContainerData.getState(), is(OPEN)); - keyValueContainer.markContainerUnhealthy(); - assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); - - // Check metadata in the .container file - File containerFile = keyValueContainer.getContainerFile(); - - keyValueContainerData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); - } - - /** - * Attempting to close an unhealthy container should fail. - * @throws IOException - */ - @Test - public void testCloseUnhealthyContainer() throws IOException { - keyValueContainer.markContainerUnhealthy(); - thrown.expect(StorageContainerException.class); - keyValueContainer.markContainerForClose(); - } - - /** - * Attempting to mark a closed container as unhealthy should succeed. - */ - @Test - public void testMarkClosedContainerAsUnhealthy() throws IOException { - // We need to create the container so the compact-on-close operation - // does not NPE. - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - keyValueContainer.close(); - keyValueContainer.markContainerUnhealthy(); - assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); - } - - /** - * Attempting to mark a quasi-closed container as unhealthy should succeed. - */ - @Test - public void testMarkQuasiClosedContainerAsUnhealthy() throws IOException { - // We need to create the container so the sync-on-quasi-close operation - // does not NPE. - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - keyValueContainer.quasiClose(); - keyValueContainer.markContainerUnhealthy(); - assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); - } - - /** - * Attempting to mark a closing container as unhealthy should succeed. - */ - @Test - public void testMarkClosingContainerAsUnhealthy() throws IOException { - keyValueContainer.markContainerForClose(); - keyValueContainer.markContainerUnhealthy(); - assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java deleted file mode 100644 index 2c71fef11a646..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ /dev/null @@ -1,316 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; - -import org.mockito.Mockito; - -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_DATANODE_VOLUME_CHOOSING_POLICY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.junit.Assert.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.doCallRealMethod; -import static org.mockito.Mockito.times; - - -import java.io.File; -import java.io.IOException; -import java.util.HashSet; -import java.util.UUID; - -/** - * Unit tests for {@link KeyValueHandler}. - */ -public class TestKeyValueHandler { - - @Rule - public TestRule timeout = new Timeout(300000); - - private static HddsDispatcher dispatcher; - private static KeyValueHandler handler; - - private final static String DATANODE_UUID = UUID.randomUUID().toString(); - - private final String baseDir = MiniDFSCluster.getBaseDirectory(); - private final String volume = baseDir + "disk1"; - - private static final long DUMMY_CONTAINER_ID = 9999; - - @BeforeClass - public static void setup() throws StorageContainerException { - // Create mock HddsDispatcher and KeyValueHandler. - handler = Mockito.mock(KeyValueHandler.class); - dispatcher = Mockito.mock(HddsDispatcher.class); - Mockito.when(dispatcher.getHandler(any())).thenReturn(handler); - Mockito.when(dispatcher.dispatch(any(), any())).thenCallRealMethod(); - Mockito.when(dispatcher.getContainer(anyLong())).thenReturn( - Mockito.mock(KeyValueContainer.class)); - Mockito.when(dispatcher.getMissingContainerSet()) - .thenReturn(new HashSet<>()); - Mockito.when(handler.handle(any(), any(), any())).thenCallRealMethod(); - doCallRealMethod().when(dispatcher).setMetricsForTesting(any()); - dispatcher.setMetricsForTesting(Mockito.mock(ContainerMetrics.class)); - Mockito.when(dispatcher.buildAuditMessageForFailure(any(), any(), any())) - .thenCallRealMethod(); - Mockito.when(dispatcher.buildAuditMessageForSuccess(any(), any())) - .thenCallRealMethod(); - } - - @Test - /** - * Test that Handler handles different command types correctly. - */ - public void testHandlerCommandHandling() throws Exception { - - // Test Create Container Request handling - ContainerCommandRequestProto createContainerRequest = - ContainerProtos.ContainerCommandRequestProto.newBuilder() - .setCmdType(ContainerProtos.Type.CreateContainer) - .setContainerID(DUMMY_CONTAINER_ID) - .setDatanodeUuid(DATANODE_UUID) - .setCreateContainer(ContainerProtos.CreateContainerRequestProto - .getDefaultInstance()) - .build(); - DispatcherContext context = new DispatcherContext.Builder().build(); - dispatcher.dispatch(createContainerRequest, context); - Mockito.verify(handler, times(1)).handleCreateContainer( - any(ContainerCommandRequestProto.class), any()); - - // Test Read Container Request handling - ContainerCommandRequestProto readContainerRequest = - getDummyCommandRequestProto(ContainerProtos.Type.ReadContainer); - dispatcher.dispatch(readContainerRequest, context); - Mockito.verify(handler, times(1)).handleReadContainer( - any(ContainerCommandRequestProto.class), any()); - - // Test Update Container Request handling - ContainerCommandRequestProto updateContainerRequest = - getDummyCommandRequestProto(ContainerProtos.Type.UpdateContainer); - dispatcher.dispatch(updateContainerRequest, context); - Mockito.verify(handler, times(1)).handleUpdateContainer( - any(ContainerCommandRequestProto.class), any()); - - // Test Delete Container Request handling - ContainerCommandRequestProto deleteContainerRequest = - getDummyCommandRequestProto(ContainerProtos.Type.DeleteContainer); - dispatcher.dispatch(deleteContainerRequest, null); - Mockito.verify(handler, times(1)).handleDeleteContainer( - any(ContainerCommandRequestProto.class), any()); - - // Test List Container Request handling - ContainerCommandRequestProto listContainerRequest = - getDummyCommandRequestProto(ContainerProtos.Type.ListContainer); - dispatcher.dispatch(listContainerRequest, context); - Mockito.verify(handler, times(1)).handleUnsupportedOp( - any(ContainerCommandRequestProto.class)); - - // Test Close Container Request handling - ContainerCommandRequestProto closeContainerRequest = - getDummyCommandRequestProto(ContainerProtos.Type.CloseContainer); - dispatcher.dispatch(closeContainerRequest, context); - Mockito.verify(handler, times(1)).handleCloseContainer( - any(ContainerCommandRequestProto.class), any()); - - // Test Put Block Request handling - ContainerCommandRequestProto putBlockRequest = - getDummyCommandRequestProto(ContainerProtos.Type.PutBlock); - dispatcher.dispatch(putBlockRequest, context); - Mockito.verify(handler, times(1)).handlePutBlock( - any(ContainerCommandRequestProto.class), any(), any()); - - // Test Get Block Request handling - ContainerCommandRequestProto getBlockRequest = - getDummyCommandRequestProto(ContainerProtos.Type.GetBlock); - dispatcher.dispatch(getBlockRequest, context); - Mockito.verify(handler, times(1)).handleGetBlock( - any(ContainerCommandRequestProto.class), any()); - - // Test Delete Block Request handling - ContainerCommandRequestProto deleteBlockRequest = - getDummyCommandRequestProto(ContainerProtos.Type.DeleteBlock); - dispatcher.dispatch(deleteBlockRequest, context); - Mockito.verify(handler, times(1)).handleDeleteBlock( - any(ContainerCommandRequestProto.class), any()); - - // Test List Block Request handling - ContainerCommandRequestProto listBlockRequest = - getDummyCommandRequestProto(ContainerProtos.Type.ListBlock); - dispatcher.dispatch(listBlockRequest, context); - Mockito.verify(handler, times(2)).handleUnsupportedOp( - any(ContainerCommandRequestProto.class)); - - // Test Read Chunk Request handling - ContainerCommandRequestProto readChunkRequest = - getDummyCommandRequestProto(ContainerProtos.Type.ReadChunk); - dispatcher.dispatch(readChunkRequest, context); - Mockito.verify(handler, times(1)).handleReadChunk( - any(ContainerCommandRequestProto.class), any(), any()); - - // Test Delete Chunk Request handling - ContainerCommandRequestProto deleteChunkRequest = - getDummyCommandRequestProto(ContainerProtos.Type.DeleteChunk); - dispatcher.dispatch(deleteChunkRequest, context); - Mockito.verify(handler, times(1)).handleDeleteChunk( - any(ContainerCommandRequestProto.class), any()); - - // Test Write Chunk Request handling - ContainerCommandRequestProto writeChunkRequest = - getDummyCommandRequestProto(ContainerProtos.Type.WriteChunk); - dispatcher.dispatch(writeChunkRequest, context); - Mockito.verify(handler, times(1)).handleWriteChunk( - any(ContainerCommandRequestProto.class), any(), any()); - - // Test List Chunk Request handling - ContainerCommandRequestProto listChunkRequest = - getDummyCommandRequestProto(ContainerProtos.Type.ListChunk); - dispatcher.dispatch(listChunkRequest, context); - Mockito.verify(handler, times(3)).handleUnsupportedOp( - any(ContainerCommandRequestProto.class)); - - // Test Put Small File Request handling - ContainerCommandRequestProto putSmallFileRequest = - getDummyCommandRequestProto(ContainerProtos.Type.PutSmallFile); - dispatcher.dispatch(putSmallFileRequest, context); - Mockito.verify(handler, times(1)).handlePutSmallFile( - any(ContainerCommandRequestProto.class), any(), any()); - - // Test Get Small File Request handling - ContainerCommandRequestProto getSmallFileRequest = - getDummyCommandRequestProto(ContainerProtos.Type.GetSmallFile); - dispatcher.dispatch(getSmallFileRequest, context); - Mockito.verify(handler, times(1)).handleGetSmallFile( - any(ContainerCommandRequestProto.class), any()); - } - - @Test - public void testVolumeSetInKeyValueHandler() throws Exception{ - File path = GenericTestUtils.getRandomizedTestDir(); - Configuration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath()); - VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); - try { - ContainerSet cset = new ContainerSet(); - int[] interval = new int[1]; - interval[0] = 2; - ContainerMetrics metrics = new ContainerMetrics(interval); - DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class); - DatanodeStateMachine stateMachine = Mockito.mock( - DatanodeStateMachine.class); - StateContext context = Mockito.mock(StateContext.class); - Mockito.when(stateMachine.getDatanodeDetails()) - .thenReturn(datanodeDetails); - Mockito.when(context.getParent()).thenReturn(stateMachine); - KeyValueHandler keyValueHandler = new KeyValueHandler(conf, context, cset, - volumeSet, metrics); - assertEquals("org.apache.hadoop.ozone.container.common" + - ".volume.RoundRobinVolumeChoosingPolicy", - keyValueHandler.getVolumeChoosingPolicyForTesting() - .getClass().getName()); - - //Set a class which is not of sub class of VolumeChoosingPolicy - conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, - "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher"); - try { - new KeyValueHandler(conf, context, cset, volumeSet, metrics); - } catch (RuntimeException ex) { - GenericTestUtils.assertExceptionContains("class org.apache.hadoop" + - ".ozone.container.common.impl.HddsDispatcher not org.apache" + - ".hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy", - ex); - } - } finally { - volumeSet.shutdown(); - FileUtil.fullyDelete(path); - } - } - - private ContainerCommandRequestProto getDummyCommandRequestProto( - ContainerProtos.Type cmdType) { - ContainerCommandRequestProto request = - ContainerProtos.ContainerCommandRequestProto.newBuilder() - .setCmdType(cmdType) - .setContainerID(DUMMY_CONTAINER_ID) - .setDatanodeUuid(DATANODE_UUID) - .build(); - - return request; - } - - - @Test - public void testCloseInvalidContainer() throws IOException { - long containerID = 1234L; - Configuration conf = new Configuration(); - KeyValueContainerData kvData = new KeyValueContainerData(containerID, - (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - KeyValueContainer container = new KeyValueContainer(kvData, conf); - kvData.setState(ContainerProtos.ContainerDataProto.State.INVALID); - - // Create Close container request - ContainerCommandRequestProto closeContainerRequest = - ContainerProtos.ContainerCommandRequestProto.newBuilder() - .setCmdType(ContainerProtos.Type.CloseContainer) - .setContainerID(DUMMY_CONTAINER_ID) - .setDatanodeUuid(DATANODE_UUID) - .setCloseContainer(ContainerProtos.CloseContainerRequestProto - .getDefaultInstance()) - .build(); - dispatcher.dispatch(closeContainerRequest, null); - - Mockito.when(handler.handleCloseContainer(any(), any())) - .thenCallRealMethod(); - doCallRealMethod().when(handler).closeContainer(any()); - // Closing invalid container should return error response. - ContainerProtos.ContainerCommandResponseProto response = - handler.handleCloseContainer(closeContainerRequest, container); - - Assert.assertTrue("Close container should return Invalid container error", - response.getResult().equals( - ContainerProtos.Result.INVALID_CONTAINER_STATE)); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java deleted file mode 100644 index e3ae56a3aa86d..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java +++ /dev/null @@ -1,231 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.junit.Assert; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.UUID; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_UNHEALTHY; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - - -/** - * Test that KeyValueHandler fails certain operations when the - * container is unhealthy. - */ -public class TestKeyValueHandlerWithUnhealthyContainer { - public static final Logger LOG = LoggerFactory.getLogger( - TestKeyValueHandlerWithUnhealthyContainer.class); - - private final static String DATANODE_UUID = UUID.randomUUID().toString(); - private static final long DUMMY_CONTAINER_ID = 9999; - - @Test - public void testRead() throws IOException { - KeyValueContainer container = getMockUnhealthyContainer(); - KeyValueHandler handler = getDummyHandler(); - - ContainerProtos.ContainerCommandResponseProto response = - handler.handleReadContainer( - getDummyCommandRequestProto(ContainerProtos.Type.ReadContainer), - container); - assertThat(response.getResult(), is(CONTAINER_UNHEALTHY)); - } - - @Test - public void testGetBlock() throws IOException { - KeyValueContainer container = getMockUnhealthyContainer(); - KeyValueHandler handler = getDummyHandler(); - - ContainerProtos.ContainerCommandResponseProto response = - handler.handleGetBlock( - getDummyCommandRequestProto(ContainerProtos.Type.GetBlock), - container); - assertThat(response.getResult(), is(CONTAINER_UNHEALTHY)); - } - - @Test - public void testGetCommittedBlockLength() throws IOException { - KeyValueContainer container = getMockUnhealthyContainer(); - KeyValueHandler handler = getDummyHandler(); - - ContainerProtos.ContainerCommandResponseProto response = - handler.handleGetCommittedBlockLength( - getDummyCommandRequestProto( - ContainerProtos.Type.GetCommittedBlockLength), - container); - assertThat(response.getResult(), is(CONTAINER_UNHEALTHY)); - } - - @Test - public void testReadChunk() throws IOException { - KeyValueContainer container = getMockUnhealthyContainer(); - KeyValueHandler handler = getDummyHandler(); - - ContainerProtos.ContainerCommandResponseProto response = - handler.handleReadChunk( - getDummyCommandRequestProto( - ContainerProtos.Type.ReadChunk), - container, null); - assertThat(response.getResult(), is(CONTAINER_UNHEALTHY)); - } - - @Test - public void testDeleteChunk() throws IOException { - KeyValueContainer container = getMockUnhealthyContainer(); - KeyValueHandler handler = getDummyHandler(); - - ContainerProtos.ContainerCommandResponseProto response = - handler.handleDeleteChunk( - getDummyCommandRequestProto( - ContainerProtos.Type.DeleteChunk), - container); - assertThat(response.getResult(), is(CONTAINER_UNHEALTHY)); - } - - @Test - public void testGetSmallFile() throws IOException { - KeyValueContainer container = getMockUnhealthyContainer(); - KeyValueHandler handler = getDummyHandler(); - - ContainerProtos.ContainerCommandResponseProto response = - handler.handleGetSmallFile( - getDummyCommandRequestProto( - ContainerProtos.Type.GetSmallFile), - container); - assertThat(response.getResult(), is(CONTAINER_UNHEALTHY)); - } - - // -- Helper methods below. - - private KeyValueHandler getDummyHandler() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - DatanodeDetails dnDetails = DatanodeDetails.newBuilder() - .setUuid(DATANODE_UUID) - .setHostName("dummyHost") - .setIpAddress("1.2.3.4") - .build(); - DatanodeStateMachine stateMachine = mock(DatanodeStateMachine.class); - when(stateMachine.getDatanodeDetails()).thenReturn(dnDetails); - - StateContext context = new StateContext( - conf, DatanodeStateMachine.DatanodeStates.RUNNING, - stateMachine); - - return new KeyValueHandler( - new OzoneConfiguration(), - context, - mock(ContainerSet.class), - mock(VolumeSet.class), - mock(ContainerMetrics.class)); - } - - private KeyValueContainer getMockUnhealthyContainer() { - KeyValueContainerData containerData = mock(KeyValueContainerData.class); - when(containerData.getState()).thenReturn( - ContainerProtos.ContainerDataProto.State.UNHEALTHY); - return new KeyValueContainer(containerData, new OzoneConfiguration()); - } - - /** - * Construct fake protobuf messages for various types of requests. - * This is tedious, however necessary to test. Protobuf classes are final - * and cannot be mocked by Mockito. - * - * @param cmdType type of the container command. - * @return - */ - private ContainerCommandRequestProto getDummyCommandRequestProto( - ContainerProtos.Type cmdType) { - final ContainerCommandRequestProto.Builder builder = - ContainerCommandRequestProto.newBuilder() - .setCmdType(cmdType) - .setContainerID(DUMMY_CONTAINER_ID) - .setDatanodeUuid(DATANODE_UUID); - - final ContainerProtos.DatanodeBlockID fakeBlockId = - ContainerProtos.DatanodeBlockID.newBuilder() - .setContainerID(DUMMY_CONTAINER_ID).setLocalID(1).build(); - - final ContainerProtos.ChunkInfo fakeChunkInfo = - ContainerProtos.ChunkInfo.newBuilder() - .setChunkName("dummy") - .setOffset(0) - .setLen(100) - .setChecksumData(ContainerProtos.ChecksumData.newBuilder() - .setBytesPerChecksum(1) - .setType(ContainerProtos.ChecksumType.CRC32) - .build()) - .build(); - - switch (cmdType) { - case ReadContainer: - builder.setReadContainer( - ContainerProtos.ReadContainerRequestProto.newBuilder().build()); - break; - case GetBlock: - builder.setGetBlock(ContainerProtos.GetBlockRequestProto.newBuilder() - .setBlockID(fakeBlockId).build()); - break; - case GetCommittedBlockLength: - builder.setGetCommittedBlockLength( - ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder() - .setBlockID(fakeBlockId).build()); - case ReadChunk: - builder.setReadChunk(ContainerProtos.ReadChunkRequestProto.newBuilder() - .setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build()); - break; - case DeleteChunk: - builder - .setDeleteChunk(ContainerProtos.DeleteChunkRequestProto.newBuilder() - .setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build()); - break; - case GetSmallFile: - builder - .setGetSmallFile(ContainerProtos.GetSmallFileRequestProto.newBuilder() - .setBlock(ContainerProtos.GetBlockRequestProto.newBuilder() - .setBlockID(fakeBlockId) - .build()) - .build()); - break; - - default: - Assert.fail("Unhandled request type " + cmdType + " in unit test"); - } - - return builder.build(); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java deleted file mode 100644 index 9e6f653e7eb24..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.FileWriter; -import java.io.IOException; -import java.nio.charset.Charset; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker; - -import org.apache.commons.compress.archivers.tar.TarArchiveEntry; -import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; -import org.apache.commons.compress.compressors.CompressorException; -import org.apache.commons.compress.compressors.CompressorInputStream; -import org.apache.commons.compress.compressors.CompressorStreamFactory; -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.IOUtils; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import static java.nio.charset.StandardCharsets.UTF_8; - -/** - * Test the tar/untar for a given container. - */ -public class TestTarContainerPacker { - - private static final String TEST_DB_FILE_NAME = "test1"; - - private static final String TEST_DB_FILE_CONTENT = "test1"; - - private static final String TEST_CHUNK_FILE_NAME = "chunk1"; - - private static final String TEST_CHUNK_FILE_CONTENT = "This is a chunk"; - - private static final String TEST_DESCRIPTOR_FILE_CONTENT = "descriptor"; - - private ContainerPacker packer = new TarContainerPacker(); - - private static final Path SOURCE_CONTAINER_ROOT = - Paths.get("target/test/data/packer-source-dir"); - - private static final Path DEST_CONTAINER_ROOT = - Paths.get("target/test/data/packer-dest-dir"); - - @BeforeClass - public static void init() throws IOException { - initDir(SOURCE_CONTAINER_ROOT); - initDir(DEST_CONTAINER_ROOT); - } - - private static void initDir(Path path) throws IOException { - if (path.toFile().exists()) { - FileUtils.deleteDirectory(path.toFile()); - } - path.toFile().mkdirs(); - } - - private KeyValueContainerData createContainer(long id, Path dir, - OzoneConfiguration conf) throws IOException { - - Path containerDir = dir.resolve("container" + id); - Path dbDir = containerDir.resolve("db"); - Path dataDir = containerDir.resolve("data"); - Files.createDirectories(dbDir); - Files.createDirectories(dataDir); - - KeyValueContainerData containerData = new KeyValueContainerData( - id, -1, UUID.randomUUID().toString(), UUID.randomUUID().toString()); - containerData.setChunksPath(dataDir.toString()); - containerData.setMetadataPath(dbDir.getParent().toString()); - containerData.setDbFile(dbDir.toFile()); - - - return containerData; - } - - @Test - public void pack() throws IOException, CompressorException { - - //GIVEN - OzoneConfiguration conf = new OzoneConfiguration(); - - KeyValueContainerData sourceContainerData = - createContainer(1L, SOURCE_CONTAINER_ROOT, conf); - - KeyValueContainer sourceContainer = - new KeyValueContainer(sourceContainerData, conf); - - //sample db file in the metadata directory - try (FileWriter writer = new FileWriter( - sourceContainerData.getDbFile().toPath() - .resolve(TEST_DB_FILE_NAME) - .toFile())) { - IOUtils.write(TEST_DB_FILE_CONTENT, writer); - } - - //sample chunk file in the chunk directory - try (FileWriter writer = new FileWriter( - Paths.get(sourceContainerData.getChunksPath()) - .resolve(TEST_CHUNK_FILE_NAME) - .toFile())) { - IOUtils.write(TEST_CHUNK_FILE_CONTENT, writer); - } - - //sample container descriptor file - try (FileWriter writer = new FileWriter( - sourceContainer.getContainerFile())) { - IOUtils.write(TEST_DESCRIPTOR_FILE_CONTENT, writer); - } - - Path targetFile = - SOURCE_CONTAINER_ROOT.getParent().resolve("container.tar.gz"); - - //WHEN: pack it - try (FileOutputStream output = new FileOutputStream(targetFile.toFile())) { - packer.pack(sourceContainer, output); - } - - //THEN: check the result - try (FileInputStream input = new FileInputStream(targetFile.toFile())) { - CompressorInputStream uncompressed = new CompressorStreamFactory() - .createCompressorInputStream(CompressorStreamFactory.GZIP, input); - TarArchiveInputStream tarStream = new TarArchiveInputStream(uncompressed); - - TarArchiveEntry entry; - Map entries = new HashMap<>(); - while ((entry = tarStream.getNextTarEntry()) != null) { - entries.put(entry.getName(), entry); - } - - Assert.assertTrue( - entries.containsKey("container.yaml")); - - } - - //read the container descriptor only - try (FileInputStream input = new FileInputStream(targetFile.toFile())) { - String containerYaml = new String(packer.unpackContainerDescriptor(input), - Charset.forName(UTF_8.name())); - Assert.assertEquals(TEST_DESCRIPTOR_FILE_CONTENT, containerYaml); - } - - KeyValueContainerData destinationContainerData = - createContainer(2L, DEST_CONTAINER_ROOT, conf); - - KeyValueContainer destinationContainer = - new KeyValueContainer(destinationContainerData, conf); - - String descriptor = ""; - - //unpackContainerData - try (FileInputStream input = new FileInputStream(targetFile.toFile())) { - descriptor = - new String(packer.unpackContainerData(destinationContainer, input), - Charset.forName(UTF_8.name())); - } - - assertExampleMetadataDbIsGood( - destinationContainerData.getDbFile().toPath()); - assertExampleChunkFileIsGood( - Paths.get(destinationContainerData.getChunksPath())); - Assert.assertFalse( - "Descriptor file should not been exctarcted by the " - + "unpackContainerData Call", - destinationContainer.getContainerFile().exists()); - Assert.assertEquals(TEST_DESCRIPTOR_FILE_CONTENT, descriptor); - - } - - - private void assertExampleMetadataDbIsGood(Path dbPath) - throws IOException { - - Path dbFile = dbPath.resolve(TEST_DB_FILE_NAME); - - Assert.assertTrue( - "example DB file is missing after pack/unpackContainerData: " + dbFile, - Files.exists(dbFile)); - - try (FileInputStream testFile = new FileInputStream(dbFile.toFile())) { - List strings = IOUtils - .readLines(testFile, Charset.forName(UTF_8.name())); - Assert.assertEquals(1, strings.size()); - Assert.assertEquals(TEST_DB_FILE_CONTENT, strings.get(0)); - } - } - - private void assertExampleChunkFileIsGood(Path chunkDirPath) - throws IOException { - - Path chunkFile = chunkDirPath.resolve(TEST_CHUNK_FILE_NAME); - - Assert.assertTrue( - "example chunk file is missing after pack/unpackContainerData: " - + chunkFile, - Files.exists(chunkFile)); - - try (FileInputStream testFile = new FileInputStream(chunkFile.toFile())) { - List strings = IOUtils - .readLines(testFile, Charset.forName(UTF_8.name())); - Assert.assertEquals(1, strings.size()); - Assert.assertEquals(TEST_CHUNK_FILE_CONTENT, strings.get(0)); - } - } - -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java deleted file mode 100644 index 4a1637cb16914..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.helpers; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.nio.ByteBuffer; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -/** - * Tests for {@link ChunkUtils}. - */ -public class TestChunkUtils { - - private static final Logger LOG = - LoggerFactory.getLogger(TestChunkUtils.class); - - private static final String PREFIX = TestChunkUtils.class.getSimpleName(); - - @Test - public void concurrentReadOfSameFile() throws Exception { - String s = "Hello World"; - byte[] array = s.getBytes(); - ByteBuffer data = ByteBuffer.wrap(array); - Path tempFile = Files.createTempFile(PREFIX, "concurrent"); - try { - ChunkInfo chunkInfo = new ChunkInfo(tempFile.toString(), - 0, data.capacity()); - File file = tempFile.toFile(); - VolumeIOStats stats = new VolumeIOStats(); - ChunkUtils.writeData(file, chunkInfo, data, stats, true); - int threads = 10; - ExecutorService executor = new ThreadPoolExecutor(threads, threads, - 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>()); - AtomicInteger processed = new AtomicInteger(); - AtomicBoolean failed = new AtomicBoolean(); - for (int i = 0; i < threads; i++) { - final int threadNumber = i; - executor.submit(() -> { - try { - ByteBuffer readBuffer = ChunkUtils.readData(file, chunkInfo, stats); - LOG.info("Read data ({}): {}", threadNumber, - new String(readBuffer.array())); - if (!Arrays.equals(array, readBuffer.array())) { - failed.set(true); - } - } catch (Exception e) { - LOG.error("Failed to read data ({})", threadNumber, e); - failed.set(true); - } - processed.incrementAndGet(); - }); - } - try { - GenericTestUtils.waitFor(() -> processed.get() == threads, - 100, (int) TimeUnit.SECONDS.toMillis(5)); - } finally { - executor.shutdownNow(); - } - assertEquals(threads * stats.getWriteBytes(), stats.getReadBytes()); - assertFalse(failed.get()); - } finally { - Files.deleteIfExists(tempFile); - } - } - - @Test - public void concurrentProcessing() throws Exception { - final int perThreadWait = 1000; - final int maxTotalWait = 5000; - int threads = 20; - List paths = new LinkedList<>(); - - try { - ExecutorService executor = new ThreadPoolExecutor(threads, threads, - 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>()); - AtomicInteger processed = new AtomicInteger(); - for (int i = 0; i < threads; i++) { - Path path = Files.createTempFile(PREFIX, String.valueOf(i)); - paths.add(path); - executor.submit(() -> { - ChunkUtils.processFileExclusively(path, () -> { - try { - Thread.sleep(perThreadWait); - } catch (InterruptedException e) { - e.printStackTrace(); - } - processed.incrementAndGet(); - return null; - }); - }); - } - try { - GenericTestUtils.waitFor(() -> processed.get() == threads, - 100, maxTotalWait); - } finally { - executor.shutdownNow(); - } - } finally { - for (Path path : paths) { - FileUtils.deleteQuietly(path.toFile()); - } - } - } - - @Test - public void serialRead() throws Exception { - String s = "Hello World"; - byte[] array = s.getBytes(); - ByteBuffer data = ByteBuffer.wrap(array); - Path tempFile = Files.createTempFile(PREFIX, "serial"); - try { - ChunkInfo chunkInfo = new ChunkInfo(tempFile.toString(), - 0, data.capacity()); - File file = tempFile.toFile(); - VolumeIOStats stats = new VolumeIOStats(); - ChunkUtils.writeData(file, chunkInfo, data, stats, true); - ByteBuffer readBuffer = ChunkUtils.readData(file, chunkInfo, stats); - assertArrayEquals(array, readBuffer.array()); - assertEquals(stats.getWriteBytes(), stats.getReadBytes()); - } catch (Exception e) { - LOG.error("Failed to read data", e); - } finally { - Files.deleteIfExists(tempFile); - } - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java deleted file mode 100644 index afbf274a8fe97..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Chunk Manager Checks. - */ -package org.apache.hadoop.ozone.container.keyvalue; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java deleted file mode 100644 index b9b1beabdbd11..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.util.Canceler; -import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -import java.util.Arrays; -import java.util.Collection; - -/** - * This test verifies the container scrubber metrics functionality. - */ -public class TestContainerScrubberMetrics { - @Test - public void testContainerMetaDataScrubberMetrics() { - OzoneConfiguration conf = new OzoneConfiguration(); - ContainerScrubberConfiguration c = conf.getObject( - ContainerScrubberConfiguration.class); - c.setMetadataScanInterval(0); - HddsVolume vol = Mockito.mock(HddsVolume.class); - ContainerController cntrl = mockContainerController(vol); - - ContainerMetadataScanner mc = new ContainerMetadataScanner(c, cntrl); - mc.runIteration(); - - Assert.assertEquals(1, mc.getMetrics().getNumScanIterations()); - Assert.assertEquals(3, mc.getMetrics().getNumContainersScanned()); - Assert.assertEquals(1, mc.getMetrics().getNumUnHealthyContainers()); - } - - @Test - public void testContainerDataScrubberMetrics() { - OzoneConfiguration conf = new OzoneConfiguration(); - ContainerScrubberConfiguration c = conf.getObject( - ContainerScrubberConfiguration.class); - c.setDataScanInterval(0); - HddsVolume vol = Mockito.mock(HddsVolume.class); - ContainerController cntrl = mockContainerController(vol); - - ContainerDataScanner sc = new ContainerDataScanner(c, cntrl, vol); - sc.runIteration(); - - ContainerDataScrubberMetrics m = sc.getMetrics(); - Assert.assertEquals(1, m.getNumScanIterations()); - Assert.assertEquals(2, m.getNumContainersScanned()); - Assert.assertEquals(1, m.getNumUnHealthyContainers()); - } - - private ContainerController mockContainerController(HddsVolume vol) { - // healthy container - Container c1 = Mockito.mock(Container.class); - Mockito.when(c1.shouldScanData()).thenReturn(true); - Mockito.when(c1.scanMetaData()).thenReturn(true); - Mockito.when(c1.scanData( - Mockito.any(DataTransferThrottler.class), - Mockito.any(Canceler.class))).thenReturn(true); - - // unhealthy container (corrupt data) - ContainerData c2d = Mockito.mock(ContainerData.class); - Mockito.when(c2d.getContainerID()).thenReturn(101L); - Container c2 = Mockito.mock(Container.class); - Mockito.when(c2.scanMetaData()).thenReturn(true); - Mockito.when(c2.shouldScanData()).thenReturn(true); - Mockito.when(c2.scanData( - Mockito.any(DataTransferThrottler.class), - Mockito.any(Canceler.class))).thenReturn(false); - Mockito.when(c2.getContainerData()).thenReturn(c2d); - - // unhealthy container (corrupt metadata) - ContainerData c3d = Mockito.mock(ContainerData.class); - Mockito.when(c3d.getContainerID()).thenReturn(102L); - Container c3 = Mockito.mock(Container.class); - Mockito.when(c3.shouldScanData()).thenReturn(false); - Mockito.when(c3.scanMetaData()).thenReturn(false); - Mockito.when(c3.getContainerData()).thenReturn(c3d); - - Collection> containers = Arrays.asList(c1, c2, c3); - ContainerController cntrl = Mockito.mock(ContainerController.class); - Mockito.when(cntrl.getContainers(vol)) - .thenReturn(containers.iterator()); - Mockito.when(cntrl.getContainers()) - .thenReturn(containers.iterator()); - - return cntrl; - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java deleted file mode 100644 index 2d679a1cb45db..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.ozoneimpl; - - -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Random; -import java.util.UUID; -import java.util.HashMap; -import java.util.List; -import java.util.ArrayList; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.DISK_OUT_OF_SPACE; -import static org.junit.Assert.assertEquals; - -/** - * This class is used to test OzoneContainer. - */ -public class TestOzoneContainer { - - private static final Logger LOG = - LoggerFactory.getLogger(TestOzoneContainer.class); - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OzoneConfiguration conf; - private String scmId = UUID.randomUUID().toString(); - private VolumeSet volumeSet; - private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; - private KeyValueContainerData keyValueContainerData; - private KeyValueContainer keyValueContainer; - private final DatanodeDetails datanodeDetails = createDatanodeDetails(); - private HashMap commitSpaceMap; //RootDir -> committed space - private final int numTestContainers = 10; - - @Before - public void setUp() throws Exception { - conf = new OzoneConfiguration(); - conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.getRoot() - .getAbsolutePath()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - folder.newFolder().getAbsolutePath()); - commitSpaceMap = new HashMap(); - volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf); - volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy(); - } - - @After - public void cleanUp() throws Exception { - if (volumeSet != null) { - volumeSet.shutdown(); - volumeSet = null; - } - } - - @Test - public void testBuildContainerMap() throws Exception { - // Format the volumes - for (HddsVolume volume : volumeSet.getVolumesList()) { - volume.format(UUID.randomUUID().toString()); - commitSpaceMap.put(getVolumeKey(volume), Long.valueOf(0)); - } - - // Add containers to disk - for (int i = 0; i < numTestContainers; i++) { - long freeBytes = 0; - long volCommitBytes; - long maxCap = (long) StorageUnit.GB.toBytes(1); - - HddsVolume myVolume; - - keyValueContainerData = new KeyValueContainerData(i, - maxCap, UUID.randomUUID().toString(), - datanodeDetails.getUuidString()); - keyValueContainer = new KeyValueContainer( - keyValueContainerData, conf); - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - myVolume = keyValueContainer.getContainerData().getVolume(); - - freeBytes = addBlocks(keyValueContainer, 2, 3); - - // update our expectation of volume committed space in the map - volCommitBytes = commitSpaceMap.get(getVolumeKey(myVolume)).longValue(); - Preconditions.checkState(freeBytes >= 0); - commitSpaceMap.put(getVolumeKey(myVolume), - Long.valueOf(volCommitBytes + freeBytes)); - } - - DatanodeStateMachine stateMachine = Mockito.mock( - DatanodeStateMachine.class); - StateContext context = Mockito.mock(StateContext.class); - Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails); - Mockito.when(context.getParent()).thenReturn(stateMachine); - // When OzoneContainer is started, the containers from disk should be - // loaded into the containerSet. - // Also expected to initialize committed space for each volume. - OzoneContainer ozoneContainer = new - OzoneContainer(datanodeDetails, conf, context, null); - - ContainerSet containerset = ozoneContainer.getContainerSet(); - assertEquals(numTestContainers, containerset.containerCount()); - - verifyCommittedSpace(ozoneContainer); - } - - @Test - public void testContainerCreateDiskFull() throws Exception { - long containerSize = (long) StorageUnit.MB.toBytes(100); - - // Format the volumes - for (HddsVolume volume : volumeSet.getVolumesList()) { - volume.format(UUID.randomUUID().toString()); - - // eat up all available space except size of 1 container - volume.incCommittedBytes(volume.getAvailable() - containerSize); - // eat up 10 bytes more, now available space is less than 1 container - volume.incCommittedBytes(10); - } - keyValueContainerData = new KeyValueContainerData(99, containerSize, - UUID.randomUUID().toString(), datanodeDetails.getUuidString()); - keyValueContainer = new KeyValueContainer(keyValueContainerData, conf); - - // we expect an out of space Exception - StorageContainerException e = LambdaTestUtils.intercept( - StorageContainerException.class, - () -> keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId) - ); - if (!DISK_OUT_OF_SPACE.equals(e.getResult())) { - LOG.info("Unexpected error during container creation", e); - } - assertEquals(DISK_OUT_OF_SPACE, e.getResult()); - } - - //verify committed space on each volume - private void verifyCommittedSpace(OzoneContainer oc) { - for (HddsVolume dnVol : oc.getVolumeSet().getVolumesList()) { - String key = getVolumeKey(dnVol); - long expectedCommit = commitSpaceMap.get(key).longValue(); - long volumeCommitted = dnVol.getCommittedBytes(); - assertEquals("Volume committed space not initialized correctly", - expectedCommit, volumeCommitted); - } - } - - private long addBlocks(KeyValueContainer container, - int blocks, int chunksPerBlock) throws Exception { - String strBlock = "block"; - String strChunk = "-chunkFile"; - int datalen = 65536; - long usedBytes = 0; - - long freeBytes = container.getContainerData().getMaxSize(); - long containerId = container.getContainerData().getContainerID(); - ReferenceCountedDB db = BlockUtils.getDB(container - .getContainerData(), conf); - - for (int bi = 0; bi < blocks; bi++) { - // Creating BlockData - BlockID blockID = new BlockID(containerId, bi); - BlockData blockData = new BlockData(blockID); - List chunkList = new ArrayList<>(); - - chunkList.clear(); - for (int ci = 0; ci < chunksPerBlock; ci++) { - String chunkName = strBlock + bi + strChunk + ci; - long offset = ci * datalen; - ChunkInfo info = new ChunkInfo(chunkName, offset, datalen); - usedBytes += datalen; - chunkList.add(info.getProtoBufMessage()); - } - blockData.setChunks(chunkList); - db.getStore().put(Longs.toByteArray(blockID.getLocalID()), - blockData.getProtoBufMessage().toByteArray()); - } - - // remaining available capacity of the container - return (freeBytes - usedBytes); - } - - private String getVolumeKey(HddsVolume volume) { - return volume.getHddsRootDir().getPath(); - } - - private DatanodeDetails createDatanodeDetails() { - Random random = new Random(); - String ipAddress = - random.nextInt(256) + "." + random.nextInt(256) + "." + random - .nextInt(256) + "." + random.nextInt(256); - - String uuid = UUID.randomUUID().toString(); - String hostName = uuid; - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - builder.setUuid(uuid) - .setHostName("localhost") - .setIpAddress(ipAddress) - .addPort(containerPort) - .addPort(ratisPort) - .addPort(restPort); - return builder.build(); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java deleted file mode 100644 index c3d3b17aefa51..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.replication; - -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; - -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -/** - * Test the replication supervisor. - */ -public class TestReplicationSupervisor { - - private OzoneConfiguration conf = new OzoneConfiguration(); - - @Test - public void normal() throws Exception { - //GIVEN - ContainerSet set = new ContainerSet(); - - FakeReplicator replicator = new FakeReplicator(set); - ReplicationSupervisor supervisor = - new ReplicationSupervisor(set, replicator, 5); - - List datanodes = IntStream.range(1, 3) - .mapToObj(v -> Mockito.mock(DatanodeDetails.class)) - .collect(Collectors.toList()); - - try { - //WHEN - supervisor.addTask(new ReplicationTask(1L, datanodes)); - supervisor.addTask(new ReplicationTask(1L, datanodes)); - supervisor.addTask(new ReplicationTask(1L, datanodes)); - supervisor.addTask(new ReplicationTask(2L, datanodes)); - supervisor.addTask(new ReplicationTask(2L, datanodes)); - supervisor.addTask(new ReplicationTask(3L, datanodes)); - //THEN - LambdaTestUtils.await(200_000, 1000, - () -> supervisor.getInFlightReplications() == 0); - - Assert.assertEquals(3, replicator.replicated.size()); - - } finally { - supervisor.stop(); - } - } - - @Test - public void duplicateMessageAfterAWhile() throws Exception { - //GIVEN - ContainerSet set = new ContainerSet(); - - FakeReplicator replicator = new FakeReplicator(set); - ReplicationSupervisor supervisor = - new ReplicationSupervisor(set, replicator, 2); - - List datanodes = IntStream.range(1, 3) - .mapToObj(v -> Mockito.mock(DatanodeDetails.class)) - .collect(Collectors.toList()); - - try { - //WHEN - supervisor.addTask(new ReplicationTask(1L, datanodes)); - LambdaTestUtils.await(200_000, 1000, - () -> supervisor.getInFlightReplications() == 0); - supervisor.addTask(new ReplicationTask(1L, datanodes)); - LambdaTestUtils.await(200_000, 1000, - () -> supervisor.getInFlightReplications() == 0); - - //THEN - System.out.println(replicator.replicated.get(0)); - - Assert.assertEquals(1, replicator.replicated.size()); - - } finally { - supervisor.stop(); - } - } - - private class FakeReplicator implements ContainerReplicator { - - private List replicated = new ArrayList<>(); - - private ContainerSet containerSet; - - FakeReplicator(ContainerSet set) { - this.containerSet = set; - } - - @Override - public void replicate(ReplicationTask task) { - KeyValueContainerData kvcd = - new KeyValueContainerData(task.getContainerId(), 100L, - UUID.randomUUID().toString(), UUID.randomUUID().toString()); - KeyValueContainer kvc = - new KeyValueContainer(kvcd, conf); - try { - //download is slow - Thread.sleep(100); - replicated.add(task); - containerSet.addContainer(kvc); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java deleted file mode 100644 index 5c905e02870f0..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Tests for the container replication. - */ -package org.apache.hadoop.ozone.container.replication; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java deleted file mode 100644 index a136983415b7d..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.testutils; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.container.keyvalue.statemachine.background - .BlockDeletingService; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; - -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * A test class implementation for {@link BlockDeletingService}. - */ -public class BlockDeletingServiceTestImpl - extends BlockDeletingService { - - // the service timeout - private static final int SERVICE_TIMEOUT_IN_MILLISECONDS = 0; - - // tests only - private CountDownLatch latch; - private Thread testingThread; - private AtomicInteger numOfProcessed = new AtomicInteger(0); - - public BlockDeletingServiceTestImpl(OzoneContainer container, - int serviceInterval, Configuration conf) { - super(container, serviceInterval, SERVICE_TIMEOUT_IN_MILLISECONDS, - TimeUnit.MILLISECONDS, conf); - } - - @VisibleForTesting - public void runDeletingTasks() { - if (latch.getCount() > 0) { - this.latch.countDown(); - } else { - throw new IllegalStateException("Count already reaches zero"); - } - } - - @VisibleForTesting - public boolean isStarted() { - return latch != null && testingThread.isAlive(); - } - - public int getTimesOfProcessed() { - return numOfProcessed.get(); - } - - // Override the implementation to start a single on-call control thread. - @Override - public void start() { - PeriodicalTask svc = new PeriodicalTask(); - // In test mode, relies on a latch countdown to runDeletingTasks tasks. - Runnable r = () -> { - while (true) { - latch = new CountDownLatch(1); - try { - latch.await(); - } catch (InterruptedException e) { - break; - } - Future future = this.getExecutorService().submit(svc); - try { - // for tests, we only wait for 3s for completion - future.get(3, TimeUnit.SECONDS); - numOfProcessed.incrementAndGet(); - } catch (Exception e) { - return; - } - } - }; - - testingThread = new ThreadFactoryBuilder() - .setDaemon(true) - .build() - .newThread(r); - testingThread.start(); - } - - @Override - public void shutdown() { - testingThread.interrupt(); - super.shutdown(); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java deleted file mode 100644 index 4e8a90bf1d42c..0000000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.testutils; -// Helper classes for ozone and container tests. \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/resources/additionalfields.container b/hadoop-hdds/container-service/src/test/resources/additionalfields.container deleted file mode 100644 index faaed06d2dcb9..0000000000000 --- a/hadoop-hdds/container-service/src/test/resources/additionalfields.container +++ /dev/null @@ -1,14 +0,0 @@ -! -containerDBType: RocksDB -chunksPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1 -containerID: 9223372036854775807 -containerType: KeyValueContainer -metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1 -layOutVersion: 1 -maxSize: 5368709120 -originPipelineId: 1297e8a9-2850-4ced-b96c-5ae31d2c73ad -originNodeId: 7f541a06-6c26-476d-9994-c6e1947e11cb -metadata: {OWNER: ozone, VOLUME: hdfs} -state: CLOSED -aclEnabled: true -checksum: 61db56da7d50798561b5365c123c5fbf7faf99fbbbd571a746af79020b7f79ba \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/resources/incorrect.checksum.container b/hadoop-hdds/container-service/src/test/resources/incorrect.checksum.container deleted file mode 100644 index ce3294750c184..0000000000000 --- a/hadoop-hdds/container-service/src/test/resources/incorrect.checksum.container +++ /dev/null @@ -1,13 +0,0 @@ -! -containerDBType: RocksDB -chunksPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1 -containerID: 9223372036854775807 -containerType: KeyValueContainer -metadataPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1 -layOutVersion: 1 -maxSize: 5368709120 -originPipelineId: 4d41dd20-6d73-496a-b247-4c6cb483f54e -originNodeId: 54842560-67a5-48a5-a7d4-4701d9538706 -metadata: {OWNER: ozone, VOLUME: hdfs} -state: OPEN -checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/resources/incorrect.container b/hadoop-hdds/container-service/src/test/resources/incorrect.container deleted file mode 100644 index 38384c8e6974c..0000000000000 --- a/hadoop-hdds/container-service/src/test/resources/incorrect.container +++ /dev/null @@ -1,13 +0,0 @@ -! -containerDBType: RocksDB -chunksPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1 -containerID: 9223372036854775807 -containerType: KeyValueContainer -metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1 -layOutVersion: 1 -maxSize: 5368709120 -originPipelineId: b2c96aa4-b757-4f97-b286-6fb80a1baf8e -originNodeId: 6dcfb385-caea-4efb-9ef3-f87fadca0f51 -metadata: {OWNER: ozone, VOLUME: hdfs} -state: NO_SUCH_STATE -checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/resources/log4j.properties b/hadoop-hdds/container-service/src/test/resources/log4j.properties deleted file mode 100644 index bb5cbe5ec321f..0000000000000 --- a/hadoop-hdds/container-service/src/test/resources/log4j.properties +++ /dev/null @@ -1,23 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# log4j configuration used during build and unit tests - -log4j.rootLogger=INFO,stdout -log4j.threshold=ALL -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle-noframes-sorted.xsl b/hadoop-hdds/dev-support/checkstyle/checkstyle-noframes-sorted.xsl deleted file mode 100644 index 7f2aedf8675f3..0000000000000 --- a/hadoop-hdds/dev-support/checkstyle/checkstyle-noframes-sorted.xsl +++ /dev/null @@ -1,189 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -

- - - - - - - -
- -

CheckStyle Audit

Designed for use with CheckStyle and Ant.
-


- - - -
- - - -
- - - - -
- - - - - - -

Files

- - - - - - - - - - - - - - -
NameErrors
-
- - - -

File

- - - - - - - - - - - - - - -
Error DescriptionLine
- Back to top -
- - -

Summary

- - - - - - - - - - - - -
FilesErrors
-
- - - - a - b - - - - - diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml deleted file mode 100644 index 1c437418ccfa3..0000000000000 --- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml +++ /dev/null @@ -1,196 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/hadoop-hdds/dev-support/checkstyle/suppressions.xml b/hadoop-hdds/dev-support/checkstyle/suppressions.xml deleted file mode 100644 index 7bc94797df856..0000000000000 --- a/hadoop-hdds/dev-support/checkstyle/suppressions.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - - - - diff --git a/hadoop-hdds/docs/README.md b/hadoop-hdds/docs/README.md deleted file mode 100644 index 8d5cdb714fd73..0000000000000 --- a/hadoop-hdds/docs/README.md +++ /dev/null @@ -1,55 +0,0 @@ - -# Hadoop Ozone/HDDS docs - -This subproject contains the inline documentation for Ozone/HDDS components. - -You can create a new page with: - -``` -hugo new content/title.md -``` - -You can check the rendering with: - -``` -hugo serve -``` - -This maven project will create the rendered HTML page during the build (ONLY if hugo is available). -And the dist project will include the documentation. - -You can adjust the menu hierarchy with adjusting the header of the markdown file: - -To show it in the main header add the menu entry: - -``` ---- -menu: main ---- -``` - -To show it as a subpage, you can set the parent. (The value could be the title of the parent page, -our you can defined an `id: ...` in the parent markdown and use that in the parent reference. - -``` ---- -menu: - main: - parent: "Getting started" ---- -``` diff --git a/hadoop-hdds/docs/archetypes/default.md b/hadoop-hdds/docs/archetypes/default.md deleted file mode 100644 index f4cc9998dc60f..0000000000000 --- a/hadoop-hdds/docs/archetypes/default.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "{{ replace .Name "-" " " | title }}" -menu: main ---- - diff --git a/hadoop-hdds/docs/config.yaml b/hadoop-hdds/docs/config.yaml deleted file mode 100644 index 7b75888fb2876..0000000000000 --- a/hadoop-hdds/docs/config.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -languageCode: "en-us" -DefaultContentLanguage: "en" -title: "Ozone" -theme: "ozonedoc" -pygmentsCodeFences: true -uglyurls: true -relativeURLs: true -disableKinds: -- taxonomy -- taxonomyTerm \ No newline at end of file diff --git a/hadoop-hdds/docs/content/_index.md b/hadoop-hdds/docs/content/_index.md deleted file mode 100644 index bb1bf9a744e78..0000000000000 --- a/hadoop-hdds/docs/content/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Overview -menu: main -weight: -10 ---- - - -# Apache Hadoop Ozone - - - -*_Ozone is a scalable, redundant, and distributed object store for Hadoop.

-Apart from scaling to billions of objects of varying sizes, -Ozone can function effectively in containerized environments -like Kubernetes._*

- -Applications like Apache Spark, Hive and YARN, work without any modifications when using Ozone. Ozone comes with a [Java client library]({{< -ref "JavaApi.md" ->}}), [S3 protocol support] ({{< ref "S3.md" >}}), and a [command line interface] -({{< ref "shell/_index.md" >}}) which makes it easy to use Ozone. - -Ozone consists of volumes, buckets, and keys: - -* Volumes are similar to user accounts. Only administrators can create or delete volumes. -* Buckets are similar to directories. A bucket can contain any number of keys, but buckets cannot contain other buckets. -* Keys are similar to files. - - }}"> - diff --git a/hadoop-hdds/docs/content/beyond/Containers.md b/hadoop-hdds/docs/content/beyond/Containers.md deleted file mode 100644 index ea7e3b17c4377..0000000000000 --- a/hadoop-hdds/docs/content/beyond/Containers.md +++ /dev/null @@ -1,235 +0,0 @@ ---- -title: "Ozone Containers" -summary: Ozone uses containers extensively for testing. This page documents the usage and best practices of Ozone. -weight: 2 ---- - - -Docker heavily is used at the ozone development with three principal use-cases: - -* __dev__: - * We use docker to start local pseudo-clusters (docker provides unified environment, but no image creation is required) -* __test__: - * We create docker images from the dev branches to test ozone in kubernetes and other container orchestrator system - * We provide _apache/ozone_ images for each release to make it easier for evaluation of Ozone. - These images are __not__ created __for production__ usage. - -

- -* __production__: - * We have documentation on how you can create your own docker image for your production cluster. - -Let's check out each of the use-cases in more detail: - -## Development - -Ozone artifact contains example docker-compose directories to make it easier to start Ozone cluster in your local machine. - -From distribution: - -```bash -cd compose/ozone -docker-compose up -d -``` - -After a local build: - -```bash -cd hadoop-ozone/dist/target/ozone-*/compose -docker-compose up -d -``` - -These environments are very important tools to start different type of Ozone clusters at any time. - -To be sure that the compose files are up-to-date, we also provide acceptance test suites which start -the cluster and check the basic behaviour. - -The acceptance tests are part of the distribution, and you can find the test definitions in `smoketest` directory. - -You can start the tests from any compose directory: - -For example: - -```bash -cd compose/ozone -./test.sh -``` - -### Implementation details - -`compose` tests are based on the apache/hadoop-runner docker image. The image itself does not contain -any Ozone jar file or binary just the helper scripts to start ozone. - -hadoop-runner provdes a fixed environment to run Ozone everywhere, but the ozone distribution itself -is mounted from the including directory: - -(Example docker-compose fragment) - -``` - scm: - image: apache/hadoop-runner:jdk11 - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - -``` - -The containers are configured based on environment variables, but because the same environment -variables should be set for each containers we maintain the list of the environment variables -in a separated file: - -``` - scm: - image: apache/hadoop-runner:jdk11 - #... - env_file: - - ./docker-config -``` - -The docker-config file contains the list of the required environment variables: - -``` -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=True -#... -``` - -As you can see we use naming convention. Based on the name of the environment variable, the -appropriate hadoop config XML (`ozone-site.xml` in our case) will be generated by a -[script](https://github.com/apache/hadoop/tree/docker-hadoop-runner-latest/scripts) which is -included in the `hadoop-runner` base image. - -The [entrypoint](https://github.com/apache/hadoop/blob/docker-hadoop-runner-latest/scripts/starter.sh) -of the `hadoop-runner` image contains a helper shell script which triggers this transformation and -can do additional actions (eg. initialize scm/om storage, download required keytabs, etc.) -based on environment variables. - -## Test/Staging - -The `docker-compose` based approach is recommended only for local test, not for multi node cluster. -To use containers on a multi-node cluster we need a Container Orchestrator like Kubernetes. - -Kubernetes example files are included in the `kubernetes` folder. - -*Please note*: all the provided images are based the `hadoop-runner` image which contains all the -required tool for testing in staging environments. For production we recommend to create your own, -hardened image with your own base image. - -### Test the release - -The release can be tested with deploying any of the example clusters: - -```bash -cd kubernetes/examples/ozone -kubectl apply -f -``` - -Plese note that in this case the latest released container will be downloaded from the dockerhub. - -### Test the development build - -To test a development build you can create your own image and upload it to your own docker registry: - - -```bash -mvn clean install -f pom.ozone.xml -DskipTests -Pdocker-build,docker-push -Ddocker.image=myregistry:9000/name/ozone -``` - -The configured image will be used in all the generated kubernetes resources files (`image:` keys are adjusted during the build) - -```bash -cd kubernetes/examples/ozone -kubectl apply -f -``` - -## Production - - - -You can use the source of our development images as an example: - - * [Base image] (https://github.com/apache/hadoop/blob/docker-hadoop-runner-jdk11/Dockerfile) - * [Docker image] (https://github.com/apache/hadoop/blob/trunk/hadoop-ozone/dist/src/main/docker/Dockerfile) - - Most of the elements are optional and just helper function but to use the provided example - kubernetes resources you may need the scripts from - [here](https://github.com/apache/hadoop/tree/docker-hadoop-runner-jdk11/scripts) - - * The two python scripts convert environment variables to real hadoop XML config files - * The start.sh executes the python scripts (and other initialization) based on environment variables. - -## Containers - -Ozone related container images and source locations: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#ContainerRepositoryBaseBranchTagsComments
1apache/ozonehttps://github.com/apache/hadoop-docker-ozoneozone-... hadoop-runner0.3.0,0.4.0,0.4.1For each Ozone release we create new release tag.
2apache/hadoop-runner https://github.com/apache/hadoopdocker-hadoop-runnercentosjdk11,jdk8,latestThis is the base image used for testing Hadoop Ozone. - This is a set of utilities that make it easy for us run ozone.
diff --git a/hadoop-hdds/docs/content/beyond/DockerCheatSheet.md b/hadoop-hdds/docs/content/beyond/DockerCheatSheet.md deleted file mode 100644 index f4f5492cf177a..0000000000000 --- a/hadoop-hdds/docs/content/beyond/DockerCheatSheet.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: "Docker Cheat Sheet" -date: 2017-08-10 -summary: Docker Compose cheat sheet to help you remember the common commands to control an Ozone cluster running on top of Docker. -weight: 4 ---- - - - -In the `compose` directory of the ozone distribution there are multiple pseudo-cluster setup which -can be used to run Ozone in different way (for example: secure cluster, with tracing enabled, -with prometheus etc.). - -If the usage is not document in a specific directory the default usage is the following: - -```bash -cd compose/ozone -docker-compose up -d -``` - -The data of the container is ephemeral and deleted together with the docker volumes. -```bash -docker-compose down -``` - -## Useful Docker & Ozone Commands - -If you make any modifications to ozone, the simplest way to test it is to run freon and unit tests. - -Here are the instructions to run freon in a docker-based cluster. - -{{< highlight bash >}} -docker-compose exec datanode bash -{{< /highlight >}} - -This will open a bash shell on the data node container. -Now we can execute freon for load generation. - -{{< highlight bash >}} -ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10 -{{< /highlight >}} - -Here is a set of helpful commands for working with docker for ozone. -To check the status of the components: - -{{< highlight bash >}} -docker-compose ps -{{< /highlight >}} - -To get logs from a specific node/service: - -{{< highlight bash >}} -docker-compose logs scm -{{< /highlight >}} - - -As the WebUI ports are forwarded to the external machine, you can check the web UI: - -* For the Storage Container Manager: http://localhost:9876 -* For the Ozone Manager: http://localhost:9874 -* For the Datanode: check the port with `docker ps` (as there could be multiple data nodes, ports are mapped to the ephemeral port range) - -You can start multiple data nodes with: - -{{< highlight bash >}} -docker-compose scale datanode=3 -{{< /highlight >}} - -You can test the commands from the [Ozone CLI]({{< ref "shell/_index.md" >}}) after opening a new bash shell in one of the containers: - -{{< highlight bash >}} -docker-compose exec datanode bash -{{< /highlight >}} diff --git a/hadoop-hdds/docs/content/beyond/RunningWithHDFS.md b/hadoop-hdds/docs/content/beyond/RunningWithHDFS.md deleted file mode 100644 index 154be5332bf4f..0000000000000 --- a/hadoop-hdds/docs/content/beyond/RunningWithHDFS.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Running concurrently with HDFS -linktitle: Runing with HDFS -weight: 1 -summary: Ozone is designed to run concurrently with HDFS. This page explains how to deploy Ozone in a exisiting HDFS cluster. ---- - - -Ozone is designed to work with HDFS. So it is easy to deploy ozone in an -existing HDFS cluster. - -The container manager part of Ozone can run inside DataNodes as a pluggable module -or as a standalone component. This document describe how can it be started as -a HDFS datanode plugin. - -To activate ozone you should define the service plugin implementation class. - - - -{{< highlight xml >}} - - dfs.datanode.plugins - org.apache.hadoop.ozone.HddsDatanodeService - -{{< /highlight >}} - -You also need to add the ozone-datanode-plugin jar file to the classpath: - -{{< highlight bash >}} -export HADOOP_CLASSPATH=/opt/ozone/share/hadoop/ozoneplugin/hadoop-ozone-datanode-plugin.jar -{{< /highlight >}} - - - -To start ozone with HDFS you should start the the following components: - - 1. HDFS Namenode (from Hadoop distribution) - 2. HDFS Datanode (from the Hadoop distribution with the plugin on the - classpath from the Ozone distribution) - 3. Ozone Manager (from the Ozone distribution) - 4. Storage Container Manager (from the Ozone distribution) - -Please check the log of the datanode whether the HDDS/Ozone plugin is started or -not. Log of datanode should contain something like this: - -``` -2018-09-17 16:19:24 INFO HddsDatanodeService:158 - Started plug-in org.apache.hadoop.ozone.web.OzoneHddsDatanodeService@6f94fb9d -``` - - diff --git a/hadoop-hdds/docs/content/beyond/_index.md b/hadoop-hdds/docs/content/beyond/_index.md deleted file mode 100644 index 2a29a5810aabe..0000000000000 --- a/hadoop-hdds/docs/content/beyond/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "Beyond Basics" -date: "2017-10-10" -menu: main -weight: 7 - ---- - - -{{}} - Beyond Basics pages go into custom configurations of Ozone, including how - to run Ozone concurrently with an existing HDFS cluster. These pages also - take deep into how to run profilers and leverage tracing support built into - Ozone. -{{}} diff --git a/hadoop-hdds/docs/content/concept/ContainerMetadata.png b/hadoop-hdds/docs/content/concept/ContainerMetadata.png deleted file mode 100644 index 48bd1c43c0361..0000000000000 Binary files a/hadoop-hdds/docs/content/concept/ContainerMetadata.png and /dev/null differ diff --git a/hadoop-hdds/docs/content/concept/Datanodes.md b/hadoop-hdds/docs/content/concept/Datanodes.md deleted file mode 100644 index ea63fe46b146e..0000000000000 --- a/hadoop-hdds/docs/content/concept/Datanodes.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: "Datanodes" -date: "2017-09-14" -weight: 4 -summary: Ozone supports Amazon's Simple Storage Service (S3) protocol. In fact, You can use S3 clients and S3 SDK based applications without any modifications with Ozone. ---- - - -Datanodes are the worker bees of Ozone. All data is stored on data nodes. -Clients write data in terms of blocks. Datanode aggregates these blocks into -a storage container. A storage container is the data streams and metadata -about the blocks written by the clients. - -## Storage Containers - -![FunctionalOzone](ContainerMetadata.png) - -A storage container is a self-contained super block. It has a list of Ozone -blocks that reside inside it, as well as on-disk files which contain the -actual data streams. This is the default Storage container format. From -Ozone's perspective, container is a protocol spec, actual storage layouts -does not matter. In other words, it is trivial to extend or bring new -container layouts. Hence this should be treated as a reference implementation -of containers under Ozone. - -## Understanding Ozone Blocks and Containers - -When a client wants to read a key from Ozone, the client sends the name of -the key to the Ozone Manager. Ozone manager returns the list of Ozone blocks -that make up that key. - -An Ozone block contains the container ID and a local ID. The figure below -shows the logical layout out of Ozone block. - -![OzoneBlock](OzoneBlock.png) - -The container ID lets the clients discover the location of the container. The -authoritative information about where a container is located is with the -Storage Container Manager (SCM). In most cases, the container location will be -cached by Ozone Manager and will be returned along with the Ozone blocks. - - -Once the client is able to locate the contianer, that is, understand which -data nodes contain this container, the client will connect to the datanode -and read the data stream specified by _Container ID:Local ID_. In other -words, the local ID serves as index into the container which describes what -data stream we want to read from. - -### Discovering the Container Locations - -How does SCM know where the containers are located ? This is very similar to -what HDFS does; the data nodes regularly send container reports like block -reports. Container reports are far more concise than block reports. For -example, an Ozone deployment with a 196 TB data node will have around 40 -thousand containers. Compare that with HDFS block count of million and half -blocks that get reported. That is a 40x reduction in the block reports. - -This extra indirection helps tremendously with scaling Ozone. SCM has far -less block data to process and the name node is a different service are -critical to scaling Ozone. diff --git a/hadoop-hdds/docs/content/concept/FunctionalOzone.png b/hadoop-hdds/docs/content/concept/FunctionalOzone.png deleted file mode 100644 index 0bc75b5e1fdbb..0000000000000 Binary files a/hadoop-hdds/docs/content/concept/FunctionalOzone.png and /dev/null differ diff --git a/hadoop-hdds/docs/content/concept/Hdds.md b/hadoop-hdds/docs/content/concept/Hdds.md deleted file mode 100644 index ad17b54d01116..0000000000000 --- a/hadoop-hdds/docs/content/concept/Hdds.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: "Storage Container Manager" -date: "2017-09-14" -weight: 3 -summary: Storage Container Manager or SCM is the core metadata service of Ozone. SCM provides a distributed block layer for Ozone. ---- - - -Storage container manager provides multiple critical functions for the Ozone -cluster. SCM acts as the cluster manager, Certificate authority, Block -manager and the Replica manager. - -{{}} -SCM is in charge of creating an Ozone cluster. When an SCM is booted up via init command, SCM creates the cluster identity and root certificates needed for the SCM certificate authority. SCM manages the life cycle of a data node in the cluster. -{{}} - -{{}} -SCM's Ceritificate authority is in -charge of issuing identity certificates for each and every -service in the cluster. This certificate infrastructre makes -it easy to enable mTLS at network layer and also the block -token infrastructure depends on this certificate infrastructure. -{{}} - -{{}} -SCM is the block manager. SCM -allocates blocks and assigns them to data nodes. Clients -read and write these blocks directly. -{{}} - - -{{}} -SCM keeps track of all the block -replicas. If there is a loss of data node or a disk, SCM -detects it and instructs data nodes make copies of the -missing blocks to ensure high avialablity. -{{}} diff --git a/hadoop-hdds/docs/content/concept/Overview.md b/hadoop-hdds/docs/content/concept/Overview.md deleted file mode 100644 index 9e5746d84617d..0000000000000 --- a/hadoop-hdds/docs/content/concept/Overview.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: Overview -date: "2017-10-10" -weight: 1 -summary: Ozone's overview and components that make up Ozone. ---- - - - -Ozone is a redundant, distributed object store optimized for Big data -workloads. The primary design point of ozone is scalability, and it aims to -scale to billions of objects. - -Ozone separates namespace management and block space management; this helps -ozone to scale much better. The namespace is managed by a daemon called -[Ozone Manager ]({{< ref "OzoneManager.md" >}}) (OM), and block space is -managed by [Storage Container Manager] ({{< ref "Hdds.md" >}}) (SCM). - - -Ozone consists of volumes, buckets, and keys. -A volume is similar to a home directory in the ozone world. -Only an administrator can create it. - -Volumes are used to store buckets. -Once a volume is created users can create as many buckets as needed. -Ozone stores data as keys which live inside these buckets. - -Ozone namespace is composed of many storage volumes. -Storage volumes are also used as the basis for storage accounting. - -The block diagram shows the core components of Ozone. - -![Architecture diagram](ozoneBlockDiagram.png) - -The Ozone Manager is the name space manager, Storage Container Manager -manages the physical and data layer and Recon is the management interface for -Ozone. - - -## Different Perspectives - -![FunctionalOzone](FunctionalOzone.png) - -Any distributed system can be viewed from different perspectives. One way to -look at Ozone is to imagine it as Ozone Manager as a name space service built on - top of HDDS, a distributed block store. - -Another way to visualize Ozone is to look at the functional layers; we have a - metadata data management layer, composed of Ozone Manager and Storage - Container Manager. - -We have a data storage layer, which is basically the data nodes and they are - managed by SCM. - -The replication layer, provided by Ratis is used to replicate metadata (OM and SCM) -and also used for consistency when data is modified at the -data nodes. - -We have a management server called Recon, that talks to all other components -of Ozone and provides a unified management API and UX for Ozone. - -We have a protocol bus that allows Ozone to be extended via other -protocols. We currently only have S3 protocol support built via Protocol bus. -Protocol Bus provides a generic notion that you can implement new file system - or object store protocols that call into O3 Native protocol. - diff --git a/hadoop-hdds/docs/content/concept/OzoneBlock.png b/hadoop-hdds/docs/content/concept/OzoneBlock.png deleted file mode 100644 index 9583bd5ee78f1..0000000000000 Binary files a/hadoop-hdds/docs/content/concept/OzoneBlock.png and /dev/null differ diff --git a/hadoop-hdds/docs/content/concept/OzoneManager.md b/hadoop-hdds/docs/content/concept/OzoneManager.md deleted file mode 100644 index 1ebdd4951d200..0000000000000 --- a/hadoop-hdds/docs/content/concept/OzoneManager.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: "Ozone Manager" -date: "2017-09-14" -weight: 2 -summary: Ozone Manager is the principal name space service of Ozone. OM manages the life cycle of volumes, buckets and Keys. ---- - - -Ozone Manager (OM) is the namespace manager for Ozone. - -This means that when you want to write some data, you ask Ozone -Manager for a block and Ozone Manager gives you a block and remembers that -information. When you want to read that file back, you need to find the -address of the block and Ozone Manager returns it you. - -Ozone Manager also allows users to organize keys under a volume and bucket. -Volumes and buckets are part of the namespace and managed by Ozone Manager. - -Each ozone volume is the root of an independent namespace under OM. -This is very different from HDFS which provides a single rooted file system. - -Ozone's namespace is a collection of volumes or is a forest instead of a -single rooted tree as in HDFS. This property makes it easy to deploy multiple -OMs for scaling. - -## Ozone Manager Metadata - -OM maintains a list of volumes, buckets, and keys. -For each user, it maintains a list of volumes. -For each volume, the list of buckets and for each bucket the list of keys. - -Ozone Manager will use Apache Ratis(A Raft protocol implementation) to -replicate Ozone Manager state. This will ensure High Availability for Ozone. - - -## Ozone Manager and Storage Container Manager - -The relationship between Ozone Manager and Storage Container Manager is best -understood if we trace what happens during a key write and key read. - -### Key Write - -* To write a key to Ozone, a client tells Ozone manager that it would like to -write a key into a bucket that lives inside a specific volume. Once Ozone -Manager determines that you are allowed to write a key to the specified bucket, -OM needs to allocate a block for the client to write data. - -* To allocate a block, Ozone Manager sends a request to Storage Container -Manager (SCM); SCM is the manager of data nodes. SCM picks three data nodes -into which client can write data. SCM allocates the block and returns the -block ID to Ozone Manager. - -* Ozone manager records this block information in its metadata and returns the -block and a block token (a security permission to write data to the block) -to the client. - -* The client uses the block token to prove that it is allowed to write data to -the block and writes data to the data node. - -* Once the write is complete on the data node, the client will update the block -information on -Ozone manager. - - -### Key Reads - -* Key reads are simpler, the client requests the block list from the Ozone -Manager -* Ozone manager will return the block list and block tokens which -allows the client to read the data from data nodes. -* Client connects to the data node and presents the block token and reads -the data from the data node. diff --git a/hadoop-hdds/docs/content/concept/_index.md b/hadoop-hdds/docs/content/concept/_index.md deleted file mode 100644 index 8f0aeb07c965c..0000000000000 --- a/hadoop-hdds/docs/content/concept/_index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Concepts -date: "2017-10-10" -menu: main -weight: 6 - ---- - - - -{{}} - -Ozone's architectural elements are explained in the following pages. The -metadata layer, data layer, protocol bus, replication layer and Recon are -discussed in the following pages. These concepts are useful if you want to -understand how ozone works in depth. - -{{}} diff --git a/hadoop-hdds/docs/content/concept/ozoneBlockDiagram.png b/hadoop-hdds/docs/content/concept/ozoneBlockDiagram.png deleted file mode 100644 index 7fb738f68a9eb..0000000000000 Binary files a/hadoop-hdds/docs/content/concept/ozoneBlockDiagram.png and /dev/null differ diff --git a/hadoop-hdds/docs/content/design/decommissioning.md b/hadoop-hdds/docs/content/design/decommissioning.md deleted file mode 100644 index 8d620be515ed3..0000000000000 --- a/hadoop-hdds/docs/content/design/decommissioning.md +++ /dev/null @@ -1,624 +0,0 @@ - - ---- -title: Decommissioning in Ozone -summary: Formal process to shut down machines in a safe way after the required replications. -date: 2019-07-31 -jira: HDDS-1881 -status: current -author: Anu Engineer, Marton Elek, Stephen O'Donnell ---- - -# Abstract - -The goal of decommissioning is to turn off a selected set of machines without data loss. It may or may not require to move the existing replicas of the containers to other nodes. - -There are two main classes of the decommissioning: - - * __Maintenance mode__: where the node is expected to be back after a while. It may not require replication of containers if enough replicas are available from other nodes (as we expect to have the current replicas after the restart.) - - * __Decommissioning__: where the node won't be started again. All the data should be replicated according to the current replication rules. - -Goals: - - * Decommissioning can be canceled any time - * The progress of the decommissioning should be trackable - * The nodes under decommissioning / maintenance mode should not been used for new pipelines / containers - * The state of the datanodes should be persisted / replicated by the SCM (in HDFS the decommissioning info exclude/include lists are replicated manually by the admin). If datanode is marked for decommissioning this state be available after SCM and/or Datanode restarts. - * We need to support validations before decommissioing (but the violations can be ignored by the admin). - * The administrator should be notified when a node can be turned off. - * The maintenance mode can be time constrained: if the node marked for maintenance for 1 week and the node is not up after one week, the containers should be considered as lost (DEAD node) and should be replicated. - -# Introduction - -Ozone is a highly available file system that relies on commodity hardware. In other words, Ozone is designed to handle failures of these nodes all the time. - -The Storage Container Manager(SCM) is designed to monitor the node health and replicate blocks and containers as needed. - -At times, Operators of the cluster can help the SCM by giving it hints. When removing a datanode, the operator can provide a hint. That is, a planned failure of the node is coming up, and SCM can make sure it reaches a safe state to handle this planned failure. - -Some times, this failure is transient; that is, the operator is taking down this node temporarily. In that case, we can live with lower replica counts by being optimistic. - -Both of these operations, __Maintenance__, and __Decommissioning__ are similar from the Replication point of view. In both cases, and the user instructs us on how to handle an upcoming failure. - -Today, SCM (*Replication Manager* component inside SCM) understands only one form of failure handling. This paper extends Replica Manager failure modes to allow users to request which failure handling model to be adopted(Optimistic or Pessimistic). - -Based on physical realities, there are two responses to any perceived failure, to heal the system by taking corrective actions or ignore the failure since the actions in the future will heal the system automatically. - -## User Experiences (Decommissioning vs Maintenance mode) - -From the user's point of view, there are two kinds of planned failures that the user would like to communicate to Ozone. - -The first kind is when a 'real' failure is going to happen in the future. This 'real' failure is the act of decommissioning. We denote this as "decommission" throughout this paper. The response that the user wants is SCM/Ozone to make replicas to deal with the planned failure. - -The second kind is when the failure is 'transient.' The user knows that this failure is temporary and cluster in most cases can safely ignore this issue. However, if the transient failures are going to cause a failure of availability; then the user would like the Ozone to take appropriate actions to address it. An example of this case, is if the user put 3 data nodes into maintenance mode and switched them off. - -The transient failure can violate the availability guarantees of Ozone; Since the user is telling us not to take corrective actions. Many times, the user does not understand the impact on availability while asking Ozone to ignore the failure. - -So this paper proposes the following definitions for Decommission and Maintenance of data nodes. - -__Decommission__ *of a data node is deemed to be complete when SCM/Ozone completes the replica of all containers on decommissioned data node to other data nodes.That is, the expected count matches the healthy count of containers in the cluster*. - -__Maintenance mode__ of a data node is complete if Ozone can guarantee at *least one copy of every container* is available in other healthy data nodes. - -## Examples - -Here are some illustrative examples: - -1. Let us say we have a container, which has only one copy and resides on Machine A. If the user wants to put machine A into maintenance mode; Ozone will make a replica before entering the maintenance mode. - -2. Suppose a container has two copies, and the user wants to put Machine A to maintenance mode. In this case; the Ozone understands that availability of the container is not affected and hence can decide to forgo replication. - -3. Suppose a container has two copies, and the user wants to put Machine A into maintenance mode. However, the user wants to put the machine into maintenance mode for one month. As the period of maintenance mode increases, the probability of data loss increases; hence, Ozone might choose to make a replica of the container even if we are entering maintenance mode. - -4. The semantics of decommissioning means that as long as we can find copies of containers in other machines, we can technically get away with calling decommission complete. Hence this clarification node; in the ordinary course of action; each decommission will create a replication flow for each container we have; however, it is possible to complete a decommission of a data node, even if we get a failure of the data node being decommissioned. As long as we can find the other datanodes to replicate from and get the number of replicas needed backup to expected count we are good. - -5. Let us say we have a copy of a container replica on Machine A, B, and C. It is possible to decommission all three machines at the same time, as decommissioning is just a status indicator of the data node and until we finish the decommissioning process. - - -The user-visible features for both of these are very similar: - -Both Decommission and Maintenance mode can be canceled any time before the operation is marked as completed by SCM. - -Decommissioned nodes, if and when added back, shall be treated as new data nodes; if they have blocks or containers on them, they can be used to reconstruct data. - - -## Maintenance mode in HDFS - -HDFS supports decommissioning and maintenance mode similar to Ozone. This is a quick description of the HDFS approach. - -The usage of HDFS maintenance mode: - - * First, you set a minimum replica count on the cluster, which can be zero, but defaults to 1. - * Then you can set a number of nodes into maintenance, with an expiry time or have them remain in maintenance forever, until they are manually removed. Nodes are put into maintenance in much the same way as nodes are decommissioned. - * When a set of nodes go into maintenance, all blocks hosted on them are scanned and if the node going into maintenance would cause the number of replicas to fall below the minimum replica count, the relevant nodes go into a decommissioning like state while new replicas are made for the blocks. - * Once the node goes into maintenance, it can be stopped etc and HDFS will not be concerned about the under-replicated state of the blocks. - * When the expiry time passes, the node is put back to normal state (if it is online and heartbeating) or marked as dead, at which time new replicas will start to be made. - -This is very similar to decommissioning, and the code to track maintenance mode and ensure the blocks are replicated etc, is effectively the same code as with decommissioning. The one area that differs is probably in the replication monitor as it must understand that the node is expected to be offline. - -The ideal way to use maintenance mode, is when you know there are a set of nodes you can stop without having to do any replications. In HDFS, the rack awareness states that all blocks should be on two racks, so that means a rack can be put into maintenance safely. - -There is another feature in HDFS called "upgrade Domain" which allows each datanode to be assigned a group. By default there should be at least 3 groups (domains) and then each of the 3 replicas will be stored on different group, allowing one full group to be put into maintenance at once. That is not yet supported in CDH, but is something we are targeting for CDPD I believe. - -One other difference with maintenance mode and decommissioning, is that you must have some sort of monitor thread checking for when maintenance is scheduled to end. HDFS solves this by having a class called the DatanodeAdminManager, and it tracks all nodes transitioning state, the under-replicated block count on them etc. - - -# Implementation - - -## Datanode state machine - -`NodeStateManager` maintains the state of the connected datanodes. The possible states: - - state | description - ---------------------|------------ - HEALTHY | The node is up and running. - STALE | Some heartbeats were missing for an already missing nodes. - DEAD | The stale node has not been recovered. - ENTERING_MAINTENANCE | The in-progress state, scheduling is disabled but the node can't not been turned off due to in-progress replication. - IN_MAINTENANCE | Node can be turned off but we expecteed to get it back and have all the replicas. - DECOMMISSIONING | The in-progress state, scheduling is disabled, all the containers should be replicated to other nodes. - DECOMMISSIONED | The node can be turned off, all the containers are replicated to other machine - - - -## High level algorithm - -The Algorithm is pretty simple from the Decommission or Maintenance point of view; - - 1. Mark a data node as DECOMMISSIONING or ENTERING_MAINTENANCE. This implies that node is NOT healthy anymore; we assume the use of a single flag and law of excluded middle. - - 2. Pipelines should be shut down and wait for confirmation that all pipelines are shutdown. So no new I/O or container creation can happen on a Datanode that is part of decomm/maint. - - 3. Once the Node has been marked as DECOMMISSIONING or ENTERING_MAINTENANCE; the Node will generate a list of containers that need replication. This list is generated by the Replica Count decisions for each container; the Replica Count will be computed by Replica Manager; - - 4. Replica Manager will check the stop condition for each node. The following should be true for all the containers to go from DECOMMISSIONING to DECOMMISSIONED or from ENTERING_MAINTENANCE to IN_MAINTENANCE. - - * Container is closed. - * We have at least one HEALTHY copy at all times. - * For entering DECOMMISSIONED mode `maintenance + healthy` must equal to `expectedeCount` - - 5. We will update the node state to DECOMMISSIONED or IN_MAINTENANCE reached state. - -_Replica count_ is a calculated number which represents the number of _missing_ replicas. The number can be negative in case of an over-replicated container. - -## Calculation of the _Replica count_ (required replicas) - -### Counters / Variables - -We have 7 different datanode state, but some of them can be combined. At high level we can group the existing state to three groups: - - * healthy state (when the container is available) - * maintenance (including IN_MAINTENANCE and ENTERING_MAINTENANCE) - * all the others. - -To calculate the required steps (required replication + stop condition) we need counter about the first two categories. - -Each counters should be calculated per container bases. - - Node state | Variable (# of containers) | - --------------------------------------|---------------------------------| - HEALTHY | `healthy` | - STALE + DEAD + DECOMMISSIONED | | - DECOMMISSIONING | | - ENTERING_MAINTENANCE + IN_MAINTENANCE | `maintenance` | - - -### The current replication model - -The current replication model in SCM/Ozone is very simplistic. We compute the replication count or the number of replications that we need to do as: - -``` -Replica count = expectedCount - currentCount -``` - -In case the _Replica count_ is positive, it means that we need to make more replicas. If the number is negative, it means that we are over replicated and we need to remove some replicas of this container. If the Replica count for a container is zero; it means that we have the expected number of containers in the cluster. - -To support idempontent placement strategies we should substract the in-fligt replications from the result: If there are one in-flight replication process and two replicas we won't start a new replication command unless the original command is timed out. The timeout is configured with `hdds.scm.replication.event.timeout` and the default value is 10 minutes. - -More preciously the current algorithm is the following: - -```java -replicaCount = expectedCount - healthy; - -if (replicaCount - inflight copies + inflight deletes) > 0 { - // container is over replicated -}.else if (replicaCount - inflight copies + inflight deletes) <0 { - // handle under replicated containers -} -``` - -The handling of inflight copies and deletes are independent from the decommissioning problem therefore here we focus only on the core mode: - -``` -replicaCount = expectedCount - healthy; -``` - -### The proposed solution - -To support the notion that a user can provide hints to the replication model, we propose to add two variables to the current model. - -In the new model, we propose to break the `currentCount` into the two separate groups. That is _Healthy nodes_ and _Maintenance nodes_. The new model replaces the currentCount with these two separate counts. The following function captures the code that drives the logic of computing Replica counts in the new model. The later section discusses the input and output of this model very extensively. - -```java -/** - * Calculate the number of the missing replicas. - * - * @return the number of the missing replicas. - If it's less than zero, the container is over replicated. - */ -int getReplicationCount(int expectedCount, int healthy, int maintenance) { - - //for over replication, count only with the healthy replicas - if (expectedCount <= healthy) { - return expectedCount - healthy; - } - - replicaCount = expectedCount - (healthy + maintenance); - - //at least one HEALTHY replicas should be guaranteed! - if (replicaCount == 0 && healthy < 1) { - replicaCount ++; - } - - //over replication is already handled. Always return with - // positive as over replication is already handled - return Math.max(0, replicaCount); -} - -``` - -To understand the reasing behind the two special `if` condition check the examples below. - -We also need to specify two end condition when the DECOMMISSIONING node can be moved to the DECOMMISSIONED state or the ENTERING_MAINTENANCE mode can be moved to the IN_MAINTENANCE state. - -The following conditions should be true for all the containers and all the containers on the specific node should be closed. - -**From DECOMMISSIONING to DECOMMISSIONED**: - - * There are at least one healthy replica - * We have three replicas (both helthy and maintenance) - -Which means that our stop condition can be formalized as: - -``` -(healthy >= 1) && (healthy + maintenance >= 3) -``` - -Both the numbers can be configurable: - - * 1 is the minimum number of healthy replicas (`decommissioning.minimum.healthy-replicas`) - * 3 is the minimum number of existing replicas (`decommissioning.minimum.replicas`) - -For example `decommissioning.minimum.healthy-replicas` can be set to two if administrator would like to survive an additional node failure during the maintenance period. - -**From ENTERING_MAINTENANCE to IN_MAINTENANCE:** - - * There are at least one healthy replicas - -This is the weaker version of the previous condition: - -``` -(healthy >= 1) -``` - -### Examples (normal cases) - -In this section we show example use cases together with the output of the proposed algorithm - -#### All healthy - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - HEALTHY | HEALTHY | HEALTHY| 3 | 3 | 0 | 0 - -The container C1 exists on machines A, B , and C. All the container reports tell us that the container is healthy. Running the above algorithm, we get: - -`expected - healthy + maint. = 3 - (3 + 0) = 0` - -It means, _"we don’t need no replication"._ - -#### One failure - - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - HEALTHY | HEALTHY | DEAD | 3 | 2 | 0 | 1 - - -The machine C has failed, and as a result, the healthy count has gone down from `3` to `2`. This means that we need to start one replication flow. - -`ReplicaCount = expected - healthy + maint. = 3 - (2 + 0) = 1.` - -This means that the new model will handle failure cases just like the current model. - -#### One decommissioning - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - HEALTHY | HEALTHY | DECOMM | 3 | 2 | 0 | 1 - - -In this case, machine C is being decommissioned. Therefore the healthy count has gone down to `2` , and decommission count is `1`. Since the - -```ReplicaCount = expected - healthy + maint`. we have `1 = 3 - (2 + 0)```, - -this gives us the decommission count implicitly. The trick here is to realize that incrementing decommission automatically causes a decrement in the healthy count, which allows us not to have _decommission_ in the equation explicitly. - -**Stop condition**: Not that if this containers is the only one on node C, node C can be moved to the DECOMMISSIONED state. - -#### Failure + decommissioning - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - HEALTHY | DEAD | DECOMM | 3 | 1 | 0 | 2 - - -Here is a case where we have a failure of a data node and a decommission of another data node. In this case, the container C1 needs two replica flows to heal itself. The equation is the same and we get - -`ReplicaCount(2) = ExpectecCount(3) - healthy(1)` - -The maintenance is still zero so ignored in this equation. - -#### 1 failure + 2 decommissioning - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - HEALTHY | DECOMM | DECOMM | 3 | 0 | 0 | 3 - -In this case, we have one failed data node and two data nodes being decommissioned. We need to get three replica flows in the system. This is achieved by: - -``` -ReplicaCount(3) = ExpectedCount(3) - (healthy(0) + maintenance(0)) -``` - -#### Maintenance mode - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - HEALTHY | DEAD | MAINT | 3 | 2 | 1 | 0 - -This represents the normal maintenance mode, where a single machine is marked as in maintenance mode. This means the following: - -``` -ReplicaCount(0) = ExpectedCount(3) - (healthy(2) + maintenance(1) -``` - -There are no replica flows since the user has asked us to move a single node into maintenance mode, and asked us explicitly not to worry about the single missing node. - -**Stop condition**: Not that if this containers is the only one on node C, node C can be moved to the IN_MAINTENANCE state. - -#### Maintenance + decommissioning - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - HEALTHY | DECOMM | MAINT | 3 | 1 | 1 | 1 - -*This is a fascinating case*; We have one good node; one decommissioned node and one node in maintenance mode. The expected result is that the replica manager will launch one replication flow to compensate for the node that is being decommissioned, and we also expect that there will be no replication for the node in maintenance mode. - -``` -Replica Count (1) = expectedCount(3) - (healthy(1) + maintenance(1)) -``` -So as expected we have one replication flow in the system. - -**Stop condition**: Not that if this containers is the only one in the system: - - * node C can be moved to the IN_MAINTENANCE state - * node B can not be decommissioned (we need the three replicas first) - -#### Decommissioning all the replicas - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - DECOMM | DECOMM | DECOMM | 3 | 0 | 0 | 3 - -In this case, we deal with all the data nodes being decommissioned. The number of healthy replicas for this container is 0, and hence: - -``` -replicaCount (3) = expectedCount (3)- (healthy(0) + maintenance(0)). -``` - -This provides us with all 3 independent replica flows in the system. - -#### Decommissioning the one remaining replicas - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - DEAD | DEAD | DECOMM | 3 | 0 | 0 | 3 - -We have two failed nodes and one node in Decomm. It is the opposite of case Line 5, where we have one failed node and 2 nodes in Decomm. The expected results are the same, we get 3 flows. - -#### Total failure - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - DEAD | DEAD | DEAD | 3 | 0 | 0 | 3 - -This is really an error condition. We have lost all 3 data nodes. The Replica Manager will compute that we need to rebuild 3 replicas, but we might not have a source to rebuild from. - -### Last replica is on ENTERING_MAINTENANCE - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - DEAD | MAINT | DEAD | 3 | 0 | 1 | 2 - -Is also an interesting case; we have lost 2 data nodes; and one node is being marked as Maint. Since we have 2 failed nodes, we need 2 replica flows in the system. However, the maintenance mode cannot be entered, since we will lose lone replica if we do that. - -### All maintenance - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - MAINT | MAINT | MAINT | 3 | 0 | 3 | *1* - -This is also a very special case; this is the case where the user is telling us to ignore the peril for all 3 replicas being offline. This means that the system will not be able to get to that container and would lead to potential I/O errors. Ozone will strive to avoid that case; this means that Ozone will hit the “if condition” and discover that we our ReplicCount is 0; since the user asked for it; but we are also going to lose all Replicas. At this point of time, we make a conscious decision to replicate one copy instead of obeying the user command and get to the situation where I/O can fail. - -**This brings us back to the semantics of Maintenance mode in Ozone**. If going into maintenance mode will not lead to a potential I/O failure, we will enter into the maintenance mode; Otherwise, we will replicate and enter into the maintenance mode after the replication is done. This is just the core replication algorithm, not the complete Decommission or Maintenance mode algorithms, just how the replica manager would behave. Once we define the behavior of Replica Manager, rest of the algorithm is easy to construct. - -Note: this is the case why we need the seconf `if` in the model (numbers in the brackets shows the actual value): - -``` - replicaCount(0) = expectedCount(3) - ( healthy(0) + maintenance(0) ); - - - //at least one HEALTHY replicas should be guaranteed! - if (replicaCount(0) == 0 && healthy(0) < 1) { - replicaCount ++; - } -``` - -### Over replication - -For over-replicated containers Ozone prefers to keep the replicas on the healthy nodes. We delete containers only if we have enough replicas on *healthy* nodes. - -``` -int getReplicationCount(int expectedCount, int healthy, int maintenance) { - - //for over replication, count only with the healthy replicas - if (expectedCount <= healthy) { - return expectedCount - healthy; - } - - replicaCount = ... //calculate missing replicas - - //over replication is already handled. Always return with - // positive as over replication is already handled - return Math.max(0, replicaCount); -} -``` - -Please note that we always assume that the the in-flight deletion are applied and the container is already deleted. - -There is a very rare case where the in-flight deletion is timed out (and as a result replication manager would assume the container is not deleted) BUT in the mean-time the container finally deleted. It can be survivied with including the creation timestamp in the ContainerDeleteCommand. - -### Over replication examples - -#### 4 replicas - - - Node A | Node B | Node C | Node D | expctd | healthy | mainten | repCount - --------|---------|---------|---------|--------|---------|---------|---------- - HEALTHY | HEALTHY | HEALTHY | HEALTHY | 3 | 4 | 0 | 0 - -This is an easy case as we have too many replicas we can safely remove on. - -``` -if (expectedCount <= healthy) { - return expectedCount - healthy -} -``` - -#### over replicated with IN_MAINTENANCE - - - Node A | Node B | Node C | Node D | expctd | healthy | mainten | repCount - --------|---------|---------|---------|--------|---------|---------|---------- - HEALTHY | HEALTHY | HEALTHY | MAINT | 3 | 3 | 1 | 0 - - -In this case we will delete the forth replica only after node D is restored and healthy again. (expectedCount is not less than healthy). As the `expectedCount (3) <= healthy (3)` the replicaCount will be calculated as `0`. - -#### over replicated with IN_MAINTENANCE - - - Node A | Node B | Node C | Node D | expctd | healthy | mainten | repCount - --------|---------|---------|---------|--------|---------|---------|---------- - HEALTHY | HEALTHY | MAINT | MAINT | 3 | 2 | 2 | 0 - -Here we are not over-repliacated as we don't have any healthy nodes. We will calculate the under-replication number as defined in the previous section: - -``` -replicaCount(-1) = expectedCount(3) - ( healthy(2) + maintenance(2) ); -``` - -The main algorithm would return with `replicaCount = -1` but as we return `Math.max(0,replicaCount)` the real response will be 0. Waiting for healthy nodes. - -### Handling in-flight replications - -In-flight replication requests and deletes are handled by the Replica Manager and the problem is orthogonal to the replication problem, but this section shows that the proposed model is not conflicted with the existing approach. - -Let's say we have an under-replicated container and we already selected a new datanode to copy a new replica to that specific node. - - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|---------|--------|---------|---------|---------- - HEALTHY | HEALTHY | (copy) | 3 | 2 | 0 | 1 - - -Here the Replication Manager detects that one replica is missing but the real copy shouldn't been requested as it's alread inprogress. ReplicaManager must not select a new datanode based on the ContainerPlacementPolicy implementation as the policy may or may not be idempotent. - -For example if the placement policy would select a datanode randomly with each loop we would select a new datanode to replicate to. - -To avoid such a situation Replica Manager maintains a list of the in-flight copies (in-memory) on the SCM side. In this list we have all the sent replication requests but they are removed after a given amount of time (10 minutes) by default. - -With calculating the in-flight copy as a possible replication the Replication Manger doesn't need to request new replication. - -When a datanode is marked to be decommissioned there could be any in-flight replication copy process in that time. - - * At datanode we should stop all of the in-flight copy (datanodes should be notified about the DECOMMISSIONING/IN_MAINTENANCE state) - * We never ask any non-healthy nodes to replicate containers. - * In SCM, we don't need to do any special action - * In `ReplicationManager` we already have a map about the inflight replications (`Map>`). - * During a normal replication the number of in-flight replications are counted as real replication (2 real replicas + 1 inflight replication = replica count 3). During this calculation we need to check the current state of the datanodes and ignore the inflight replication if they are assigned to a node which is in decommissioning state. (Or we should update the inflight map, in case of node state change) - -### In-flight examples - -#### Maintenance + inflight - - Node A | Node B | Node C | expctd | healthy | mainten | repCount | copy | - --------|---------|---------|--------|---------|---------|----------|------| - HEALTHY | MAINT | copying | 3 | 1 | 1 | 1 | 1 | - -Here one have one node ENTERING_MAINTENANCE state, and one replica is missing and already started to be replicated. We don't need to start a new copy and node B can be moved to the IN_MAINTENANCE mode. - -``` -Replica Count (1) = expectedCount(3) - (healthy(1) + maintenance(1)) -Containers to copy (0) = Replica Count (1) - inflight copies (1) -``` - -#### Maintenance + inflight - - Node A | Node B | Node C | expctd | healthy | mainten | repCount | copy | - --------|---------|---------|--------|---------|---------|----------|------| - DECOMM | copying | copying | 3 | 0 | 0 | 3 | 1 | - - -Node A can not be DECOMMISSIONED as we have no HEALTHY replicas at all. - - -## Statefulness - -SCM stores all the node state in-memory. After a restart on the SCM side the datanode state can be lost. - -**Ozone doesn't guarantee that decommissioning/maintenance mode state survives the SCM restarts!!!** - - * If SCM restarts DECOMMISSIONED nodes will not report any more container reports and the nodes won't be registered. - * ENTERING_MAINTENANCE and DECOMMISSIONING nodes will became HEALTHY again and the decommissioning CLI command should be repeated. - * IN_MAINTENANCE nodes will become DEAD and all the containers will be replicated. - - *Ozone assumes that the maintenance mode is used short-term and SCM is not restarted during this specific period.* - -*Reasoning*: - -Neither of the node state nor the container state are persisted in SCM side. The decommissioned state can be stored on the SCM side (or on the SCM side and the datanode side) which can provide better user experience (and may be implemented). - -But to support maintenance mode after restart all the container information is required to be persisted (which is a too big architectural change). - -To make a replication decision replication manager needs the number of healthy replicas (they are reported via heartbeats) AND the number of containers on the node which is in maintenance mode. The later one is not available if the SCM is restarted as the container map exists only in the memory and the node which is turned off can't report any more container reports. Therefore the information about the existing containers on the node which is in the maintenance mode **can't be available'**. - -## Throttling - -SCM should avoid to request too many replication to live enough network bandwidth for the requests. - -Replication Manager can easily throttle the replication requests based on `inflightReplication` map, but this problem is independent from the handling of the decommissioning / maintenance mode because it should be solved for any kind of replication not just for this. - -## User interface - -The decommissioning and maintenance mode can be administered with a CLI interface. - -Required feature: - - * Set the state of a datanode (to DECOMMISSIONING or ENTERING_MAINTENANCE) - * Undo the decommissioning process - * check the current progress: - * This can be a table with the nodes, status of the nodes, number of containers, containers under replication and containers which doesn't much the stop condition yet (required replications) - * All the commands can support topology related filters (eg. display the nodes only for a specific rack or show the status of the nodes of s specific rack) - * Check current replication information of one specific container (required to debug why the decommissioning is stuck) - -## Checks before the decommissioning - -Decommissioning is requested via a new RPC call with the help of a new CLI tool. The server should check the current state of the cluster and deny the decommissioning if it's not possible. Possible violations: - - * Not enough space to store the new replicas. - * Not enough node to create all kind of pipelines - - In case of any violation, the request will fail, but any of theses rules can be turned off with a next request and the decommissioning can be forced. - -## Maintain progress - -We need to show the progress of the decommissioning process per node and cluster-wide. We already have the information about the under replicated containers, but we don't know the numbers of the containers before decommissioning. - -Instead of saving the original number of the required replications before (which is very fragile) we don't provide an absolute progress just the numbers of the remaining replication: - - -``` - Node | Status | # containers | in-progress replications| required replication - ----------------|------------------------|--------------|--------------------------|------------------------ - Node A | ENTERING_MAINTENANCE | 2837 | 12 | 402 - Node B | HEALTHY | 1239 | 0 | 0 - Node C | IN_MAINTENANCE | 2348 | 0 | 0 -``` - -`# containers` means the total number of the containers on the specific datanodes. To get the original number of the planned copies we can save the original 'container-to-node' map in memory and show some progress and provide more information for the users. diff --git a/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md b/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md deleted file mode 100644 index cc7569eb2cfa0..0000000000000 --- a/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: Ozone Enhancement Proposals -summary: Definition of the process to share new technical proposals with the Ozone community. -date: 2019-06-07 -jira: HDDS-1659 -status: accepted -author: Anu Enginner, Marton Elek ---- - - -## Problem statement - -Some of the biggers features requires well defined plans before the implementation. Until now it was managed by uploading PDF design docs to selected JIRA. There are multiple problems with the current practice. - - 1. There is no easy way to find existing up-to-date and outdated design docs. - 2. Design docs usually have better description of the problem that the user docs - 3. We need better tools to discuss the design docs in the development phase of the doc - -We propose to follow the same process what we have now, but instead of uploading a PDF to the JIRA, create a PR to merge the proposal document to the documentation project. - -## Non-goals - - * Modify the existing workflow or approval process - * Migrate existing documents - * Make it harder to create design docs (it should be easy to support the creation of proposals for any kind of tasks) - * Define how the design docs are handled/created *before* the publication (this proposal is about the publishing process) - -## Proposed solution - - * Open a dedicated Jira (`HDDS-*` but with specific component) - * Use standard name prefix in the jira (easy to filter on the mailing list) `[OEP] - * Create a PR to add the design doc to the current documentation - * The content of the design can be added to the documentation (Recommended) - * Or can be added as external reference - * The design doc (or the summary with the reference) will be merged to the design doc folder of `hadoop-hdds/docs/content/design` (will be part of the docs) - * Discuss it as before (lazy consesus, except if somebody calls for a real vote) - * Design docs can be updated according to the changes during the implementation - * Only the implemented design docs will be visible as part of the design docs - - -As a result all the design docs can be listed under the documentation page. - -A good design doc has the following properties: - - 1. Publicly available for anybody (Please try to avoid services which are available only with registration, eg: google docs) - 2. Archived for the future (Commit it to the source OR use apache jira or wiki) - 3. Editable later (Best format is markdown, RTF is also good. PDF has a limitation, it's very hard to reuse the text, or create an updated design doc) - 4. Well structured to make it easy to comment any part of the document (Markdown files which are part of the pull request can be commented in the PR line by line) - - -### Example 1: Design doc as a markdown file - -The easiest way to create a design doc is to create a new markdown file in a PR and merge it to `hadoop-hdds/docs/content/design`. - - 1. Publicly available: YES, it can be linked from Apache git or github - 2. Archived: YES, and it's also versioned. All the change history can be tracked. - 3. Editable later: YES, as it's just a simple text file - 4. Commentable: YES, comment can be added to each line. - -### Example 2: Design doc as a PDF - -A very common practice of today is to create design doc on google docs and upload it to the JIRA. - - 1. Publicy available: YES, anybody can download it from the Jira. - 2. Archived: YES, it's available from Apache infra. - 3. Editable: NO, It's harder to reuse the text to import to the docs or create a new design doc. - 4. Commentable: PARTIAL, Not as easy as a text file or the original google docs, but a good structure with numbered section may help - - -### The format - -While the first version (markdown files) are the most powerful, the second version (the existing practice) is also acceptable. In this case we propose to create a PR with adding a reference page *without* the content but including the link. - -For example: - -```yaml ---- -title: Ozone Security Design -summary: A comprehensive description of the security flow between server and client components. -date: 2018-02-22 -jira: HDDS-4 -status: implemented -author: Sanjay Radia, Jitendra Pandey, Xiaoyu Yao, Anu Engineer - -## Summary - -Ozone security model is based on Kerberos and similar to the Hadoop security but some of the parts are improved: for example the SCM works as a Certificate Authority and PKI based solutions are wildely used. - -## Reference - -For more details please check the (uploaded design doc)[https://issues.apache.org/jira/secure/attachment/12911638/HadoopStorageLayerSecurity.pdf]. - -``` - -Obviously with the first approach the design doc itself can be included in this markdown file. - -## Migration - -It's not a hard requirement to migrate all the design doc. But process is always open: - - 1. To create reference pages for any of the old design docs - 2. To migrate any new design docs to markdown formats (by anybody not just by the author) - 3. To update any of the old design docs based on the current state of the code (We have versioning!) - -## Document template - -This the proposed template to document any proposal. It's recommended but not required the use exactly the some structure. Some proposal may require different structure, but we need the following information. - -1. Summary - -> Give a one sentence summary, like the jira title. It will be displayed on the documentation page. Should be enough to understand - -2. Status - -Defined in the markdown header. Proposed statuses: - - * `accepted`: (Use this as by default. If not accapted, won't be merged) - - * `implemented`: The discussed technical solution is implemented (maybe with some minor implementation difference) - - * `replaced`: Replaced by a new design doc - - * `outdated`: Code has been changed and design doc doesn't reflect any more the state of the current code. - - Note: the _accepted_ design docs won't be visible as part of the documentation or only under a dedicated section to clearly comminucate that it's not ready, yet. - -3. Problem statement (Motivation / Abstract) - -> What is the problem and how would you solve it? Think about an abstract of a paper: one paragraph overview. Why will the world better with this change? - -4. Non-goals - - > Very important to define what is outside of the scope of this proposal - -5. Technical Description (Architecture and implementation details) - - > Explain the problem in more details. How can it be reproduced? What is the current solution? What is the limitation of the current solution? - - > How the new proposed solution would solve the problem? Architectural design. - - > Implementation details. What should be changed in the code. Is it a huge change? Do we need to change wire protocol? Backward compatibility? - -6. Alternatives - - > What are the other alternatives you considered and why do yoy prefer the proposed solution The goal of this section is to help people understand why this is the best solution now, and also to prevent churn in the future when old alternatives are reconsidered. - -Note: In some cases 4/5 can be combined. For example if you have multiple proposals, the first version may include multiple solutions. At the end ot the discussion we can move the alternatives to 5. and explain why the community is decided to use the selected option. - -7. Plan - - > Planning to implement the feature. Estimated size of the work? Do we need feature branch? Any migration plan, dependency? If it's not a big new feature it can be one sentence or optional. - -8. References - -## Workflows form other projects - -There are similar process in other open source projects. This document and the template is inspired by the following projects: - - * [Apache Kafka Improvement Proposals](https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Improvement+Proposals) - * [Apache Spark Project Improvement Proposals](https://spark.apache.org/improvement-proposals.html) - * [Kubernetes Enhancement Proposals](https://github.com/kubernetes/enhancements/tree/master/keps) - -Short summary of the processes: - -__Kafka__ process: - - * Create wiki page - * Start discussion on mail thread - * Vote on mail thread - -__Spark__ process: - - * Create JIRA (dedicated label) - * Discuss on the jira page - * Vote on dev list - -*Kubernetes*: - - * Deditaced git repository - * KEPs are committed to the repo - * Well defined approval process managed by SIGs (KEPs are assigned to SIGs) - diff --git a/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md b/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md deleted file mode 100644 index dd23e04941690..0000000000000 --- a/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "GDPR in Ozone" -date: "2019-September-17" -weight: 5 -summary: GDPR in Ozone -icon: user ---- - - - -Enabling GDPR compliance in Ozone is very straight forward. During bucket -creation, you can specify `--enforcegdpr=true` or `-g=true` and this will -ensure the bucket is GDPR compliant. Thus, any key created under this bucket -will automatically be GDPR compliant. - -GDPR can only be enabled on a new bucket. For existing buckets, you would -have to create a new GDPR compliant bucket and copy data from old bucket into - new bucket to take advantage of GDPR. - -Example to create a GDPR compliant bucket: - -`ozone sh bucket create --enforcegdpr=true /hive/jan` - -`ozone sh bucket create -g=true /hive/jan` - -If you want to create an ordinary bucket then you can skip `--enforcegdpr` -and `-g` flags. \ No newline at end of file diff --git a/hadoop-hdds/docs/content/gdpr/_index.md b/hadoop-hdds/docs/content/gdpr/_index.md deleted file mode 100644 index 9888369023bf5..0000000000000 --- a/hadoop-hdds/docs/content/gdpr/_index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: GDPR -name: GDPR -identifier: gdpr -menu: main -weight: 5 ---- - - -{{}} - The General Data Protection Regulation (GDPR) is a law that governs how personal data should be handled. This is an European Union law, but due to the nature of software oftentimes spills into other geographies. - Ozone supports GDPR's Right to Erasure(Right to be Forgotten). -{{}} - -
- -Once you create a GDPR compliant bucket, any key created in that bucket will -automatically by GDPR compliant. - - diff --git a/hadoop-hdds/docs/content/interface/JavaApi.md b/hadoop-hdds/docs/content/interface/JavaApi.md deleted file mode 100644 index bb18068f40006..0000000000000 --- a/hadoop-hdds/docs/content/interface/JavaApi.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: "Java API" -date: "2017-09-14" -weight: 1 -summary: Ozone has a set of Native RPC based APIs. This is the lowest level API's on which all other protocols are built. This is the most performant and feature-full of all Ozone protocols. ---- - - -Ozone ships with its own client library that supports RPC. For generic use cases the S3 -compatible REST interface also can be used instead of the Ozone client. - - -## Creating an Ozone client -The Ozone client factory creates the ozone client. To get a RPC client we can call - -{{< highlight java >}} -OzoneClient ozClient = OzoneClientFactory.getRpcClient(); -{{< /highlight >}} - -If the user want to create a client based on the configuration, then they can -call. - -{{< highlight java >}} -OzoneClient ozClient = OzoneClientFactory.getClient(); -{{< /highlight >}} - -and an appropriate client based on configuration will be returned. - -## Writing data using Ozone Client - -The hierarchy of data inside ozone is a volume, bucket and a key. A volume -is a collection of buckets. A bucket is a collection of keys. To write data -to the ozone, you need a volume, bucket and a key. - -### Creating a Volume - -Once we have a client, we need to get a reference to the ObjectStore. This -is done via - -{{< highlight java >}} -ObjectStore objectStore = ozClient.getObjectStore(); -{{< /highlight >}} - -An object store represents an active cluster against which the client is working. - -{{< highlight java >}} -// Let us create a volume to store our game assets. -// This uses default arguments for creating that volume. -objectStore.createVolume("assets"); - -// Let us verify that the volume got created. -OzoneVolume assets = objectStore.getVolume("assets"); -{{< /highlight >}} - - -It is possible to pass an array of arguments to the createVolume by creating volume arguments. - -### Creating a Bucket - -Once you have a volume, you can create buckets inside the volume. - -{{< highlight java >}} -// Let us create a bucket called videos. -assets.createBucket("videos"); -OzoneBucket video = assets.getBucket("videos"); -{{< /highlight >}} - -At this point we have a usable volume and a bucket. Our volume is called _assets_ and bucket is called _videos_. - -Now we can create a Key. - -### Reading and Writing a Key - -With a bucket object the users can now read and write keys. The following code reads a video called intro.mp4 from the local disk and stores in the _video_ bucket that we just created. - -{{< highlight java >}} -// read data from the file, this is a user provided function. -byte [] videoData = readFile("intro.mp4"); - -// Create an output stream and write data. -OzoneOutputStream videoStream = video.createKey("intro.mp4", 1048576); -videoStream.write(videoData); - -// Close the stream when it is done. -videoStream.close(); - - -// We can use the same bucket to read the file that we just wrote, by creating an input Stream. -// Let us allocate a byte array to hold the video first. -byte[] data = new byte[(int)1048576]; -OzoneInputStream introStream = video.readKey("intro.mp4"); -// read intro.mp4 into the data buffer -introStream.read(data); -introStream.close(); -{{< /highlight >}} - - -Here is a complete example of the code that we just wrote. Please note the close functions being called in this program. - -{{< highlight java >}} -// Let us create a client -OzoneClient ozClient = OzoneClientFactory.getClient(); - -// Get a reference to the ObjectStore using the client -ObjectStore objectStore = ozClient.getObjectStore(); - -// Let us create a volume to store our game assets. -// This default arguments for creating that volume. -objectStore.createVolume("assets"); - -// Let us verify that the volume got created. -OzoneVolume assets = objectStore.getVolume("assets"); - -// Let us create a bucket called videos. -assets.createBucket("videos"); -OzoneBucket video = assets.getBucket("videos"); - -// read data from the file, this is assumed to be a user provided function. -byte [] videoData = readFile("intro.mp4"); - -// Create an output stream and write data. -OzoneOutputStream videoStream = video.createKey("intro.mp4", 1048576); -videoStream.write(videoData); - -// Close the stream when it is done. -videoStream.close(); - - -// We can use the same bucket to read the file that we just wrote, by creating an input Stream. -// Let us allocate a byte array to hold the video first. - -byte[] data = new byte[(int)1048576]; -OzoneInputStream introStream = video.readKey("intro.mp4"); -introStream.read(data); - -// Close the stream when it is done. -introStream.close(); - -// Close the client. -ozClient.close(); -{{< /highlight >}} diff --git a/hadoop-hdds/docs/content/interface/OzoneFS.md b/hadoop-hdds/docs/content/interface/OzoneFS.md deleted file mode 100644 index fcfef6dde3d6a..0000000000000 --- a/hadoop-hdds/docs/content/interface/OzoneFS.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -title: Ozone File System -date: 2017-09-14 -weight: 2 -summary: Hadoop Compatible file system allows any application that expects an HDFS like interface to work against Ozone with zero changes. Frameworks like Apache Spark, YARN and Hive work against Ozone without needing any change. ---- - - -The Hadoop compatible file system interface allows storage backends like Ozone -to be easily integrated into Hadoop eco-system. Ozone file system is an -Hadoop compatible file system. - -## Setting up the Ozone file system - -To create an ozone file system, we have to choose a bucket where the file system would live. This bucket will be used as the backend store for OzoneFileSystem. All the files and directories will be stored as keys in this bucket. - -Please run the following commands to create a volume and bucket, if you don't have them already. - -{{< highlight bash >}} -ozone sh volume create /volume -ozone sh bucket create /volume/bucket -{{< /highlight >}} - -Once this is created, please make sure that bucket exists via the _list volume_ or _list bucket_ commands. - -Please add the following entry to the core-site.xml. - -{{< highlight xml >}} - - fs.o3fs.impl - org.apache.hadoop.fs.ozone.OzoneFileSystem - - - fs.AbstractFileSystem.o3fs.impl - org.apache.hadoop.fs.ozone.OzFs - - - fs.defaultFS - o3fs://bucket.volume - -{{< /highlight >}} - -This will make this bucket to be the default file system for HDFS dfs commands and register the o3fs file system type. - -You also need to add the ozone-filesystem.jar file to the classpath: - -{{< highlight bash >}} -export HADOOP_CLASSPATH=/opt/ozone/share/ozonefs/lib/hadoop-ozone-filesystem-lib-current*.jar:$HADOOP_CLASSPATH -{{< /highlight >}} - -Once the default Filesystem has been setup, users can run commands like ls, put, mkdir, etc. -For example, - -{{< highlight bash >}} -hdfs dfs -ls / -{{< /highlight >}} - -or - -{{< highlight bash >}} -hdfs dfs -mkdir /users -{{< /highlight >}} - - -Or put command etc. In other words, all programs like Hive, Spark, and Distcp will work against this file system. -Please note that any keys created/deleted in the bucket using methods apart from OzoneFileSystem will show up as directories and files in the Ozone File System. - -Note: Bucket and volume names are not allowed to have a period in them. -Moreover, the filesystem URI can take a fully qualified form with the OM host and an optional port as a part of the path following the volume name. -For example, you can specify both host and port: - -{{< highlight bash>}} -hdfs dfs -ls o3fs://bucket.volume.om-host.example.com:5678/key -{{< /highlight >}} - -When the port number is not specified, it will be retrieved from config key `ozone.om.address` -if defined; or it will fall back to the default port `9862`. -For example, we have `ozone.om.address` configured as following in `ozone-site.xml`: - -{{< highlight xml >}} - - ozone.om.address - 0.0.0.0:6789 - -{{< /highlight >}} - -When we run command: - -{{< highlight bash>}} -hdfs dfs -ls o3fs://bucket.volume.om-host.example.com/key -{{< /highlight >}} - -The above command is essentially equivalent to: - -{{< highlight bash>}} -hdfs dfs -ls o3fs://bucket.volume.om-host.example.com:6789/key -{{< /highlight >}} - -Note: Only port number from the config is used in this case, -whereas the host name in the config `ozone.om.address` is ignored. - - -## Supporting older Hadoop version (Legacy jar, BasicOzoneFilesystem) - -There are two ozonefs files, both of them include all the dependencies: - - * share/ozone/lib/hadoop-ozone-filesystem-lib-current-VERSION.jar - * share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-VERSION.jar - -The first one contains all the required dependency to use ozonefs with a - compatible hadoop version (hadoop 3.2). - -The second one contains all the dependency in an internal, separated directory, - and a special class loader is used to load all the classes from the location. - -With this method the hadoop-ozone-filesystem-lib-legacy.jar can be used from - any older hadoop version (eg. hadoop 3.1, hadoop 2.7 or spark+hadoop 2.7) - -Similar to the dependency jar, there are two OzoneFileSystem implementation. - -For hadoop 3.0 and newer, you can use `org.apache.hadoop.fs.ozone.OzoneFileSystem` - which is a full implementation of the Hadoop compatible File System API. - -For Hadoop 2.x you should use the Basic version: `org.apache.hadoop.fs.ozone.BasicOzoneFileSystem`. - -This is the same implementation but doesn't include the features/dependencies which are added with - Hadoop 3.0. (eg. FS statistics, encryption zones). - -### Summary - -The following table summarize which jar files and implementation should be used: - -Hadoop version | Required jar | OzoneFileSystem implementation ----------------|-------------------------|---------------------------------------------------- -3.2 | filesystem-lib-current | org.apache.hadoop.fs.ozone.OzoneFileSystem -3.1 | filesystem-lib-legacy | org.apache.hadoop.fs.ozone.OzoneFileSystem -2.9 | filesystem-lib-legacy | org.apache.hadoop.fs.ozone.BasicOzoneFileSystem -2.7 | filesystem-lib-legacy | org.apache.hadoop.fs.ozone.BasicOzoneFileSystem - With this method the hadoop-ozone-filesystem-lib-legacy.jar can be used from - any older hadoop version (eg. hadoop 2.7 or spark+hadoop 2.7) diff --git a/hadoop-hdds/docs/content/interface/S3.md b/hadoop-hdds/docs/content/interface/S3.md deleted file mode 100644 index 6a8e2d7c53b02..0000000000000 --- a/hadoop-hdds/docs/content/interface/S3.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: S3 Protocol -weight: 3 -summary: Ozone supports Amazon's Simple Storage Service (S3) protocol. In fact, You can use S3 clients and S3 SDK based applications without any modifications with Ozone. ---- - - - - -Ozone provides S3 compatible REST interface to use the object store data with any S3 compatible tools. - -## Getting started - -S3 Gateway is a separated component which provides the S3 compatible APIs. It should be started additional to the regular Ozone components. - -You can start a docker based cluster, including the S3 gateway from the release package. - -Go to the `compose/ozones3` directory, and start the server: - -```bash -docker-compose up -d -``` - -You can access the S3 gateway at `http://localhost:9878` - -## URL Schema - -Ozone S3 gateway supports both the virtual-host-style URL s3 bucket addresses (eg. http://bucketname.host:9878) and the path-style addresses (eg. http://host:9878/bucketname) - -By default it uses the path-style addressing. To use virtual host style URLs set your main domain name in your `ozone-site.xml`: - -```xml - - ozone.s3g.domain.name - s3g.internal - -``` - -## Bucket browser - -Buckets could be browsed from the browser by adding `?browser=true` to the bucket URL. - -For example the content of the 'testbucket' could be checked from the browser using the URL http://localhost:9878/testbucket?browser=true - - -## Implemented REST endpoints - -Operations on S3Gateway service: - -Endpoint | Status | -------------|-------------| -GET service | implemented | - -Operations on Bucket: - -Endpoint | Status | Notes -------------------------------------|-------------|--------------- -GET Bucket (List Objects) Version 2 | implemented | -HEAD Bucket | implemented | -DELETE Bucket | implemented | -PUT Bucket (Create bucket) | implemented | -Delete Multiple Objects (POST) | implemented | - -Operation on Objects: - -Endpoint | Status | Notes -------------------------------------|-----------------|--------------- -PUT Object | implemented | -GET Object | implemented | -Multipart Upload | implemented | Except the listing of the current MultiPartUploads. -DELETE Object | implemented | -HEAD Object | implemented | - - -## Security - -If security is not enabled, you can *use* **any** AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY - -If security is enabled, you can get the key and the secret with the `ozone s3 getsecret` command (*kerberos based authentication is required). - -```bash -/etc/security/keytabs/testuser.keytab testuser/scm@EXAMPLE.COM -ozone s3 getsecret -awsAccessKey=testuser/scm@EXAMPLE.COM -awsSecret=c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e567e8296d2999 - -``` - -Now, you can use the key and the secret to access the S3 endpoint: - -```bash -export AWS_ACCESS_KEY_ID=testuser/scm@EXAMPLE.COM -export AWS_SECRET_ACCESS_KEY=c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e567e8296d2999 -aws s3api --endpoint http://localhost:9878 create-bucket --bucket bucket1 -``` - - -## S3 bucket name mapping to Ozone buckets - -**Note**: Ozone has a notion for 'volumes' which is missing from the S3 Rest endpoint. Under the hood S3 bucket names are mapped to Ozone 'volume/bucket' locations (depending on the given authentication information). - -To show the storage location of a S3 bucket, use the `ozone s3 path ` command. - -```bash -aws s3api --endpoint-url http://localhost:9878 create-bucket --bucket=bucket1 - -ozone s3 path bucket1 -Volume name for S3Bucket is : s3thisisakey -Ozone FileSystem Uri is : o3fs://bucket1.s3thisisakey -``` - -## Clients - -### AWS Cli - -`aws` CLI could be used by specifying the custom REST endpoint. - -```bash -aws s3api --endpoint http://localhost:9878 create-bucket --bucket buckettest -``` - -Or - -```bash -aws s3 ls --endpoint http://localhost:9878 s3://buckettest -``` - -### S3 Fuse driver (goofys) - -Goofys is a S3 FUSE driver. It could be used to mount any Ozone bucket as posix file system. - - -```bash -goofys --endpoint http://localhost:9878 bucket1 /mount/bucket1 -``` diff --git a/hadoop-hdds/docs/content/interface/_index.md b/hadoop-hdds/docs/content/interface/_index.md deleted file mode 100644 index 254864732fb84..0000000000000 --- a/hadoop-hdds/docs/content/interface/_index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "Programming Interfaces" -menu: - main: - weight: 4 ---- - - -{{}} -Ozone is a multi-protocol file system. There are different protocols by which - users can access data on Ozone. -{{}} diff --git a/hadoop-hdds/docs/content/recipe/Prometheus.md b/hadoop-hdds/docs/content/recipe/Prometheus.md deleted file mode 100644 index 310d078567b17..0000000000000 --- a/hadoop-hdds/docs/content/recipe/Prometheus.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: Monitoring with Prometheus -summary: A Simple recipe to monitor Ozone using Prometheus -linktitle: Prometheus ---- - - -[Prometheus](https://prometheus.io/) is an open-source monitoring server developed under under the [Cloud Native Computing Foundation](https://www.cncf.io/). - -Ozone supports Prometheus out of the box. The servers start a prometheus -compatible metrics endpoint where all the available hadoop metrics are published in prometheus exporter format. - -## Prerequisites - - 1. [Install the and start]({{< ref "start/RunningViaDocker.md" >}}) an Ozone cluster. - 2. [Download](https://prometheus.io/download/#prometheus) the prometheus binary. - -## Monitoring with prometheus - -* To enable the Prometheus metrics endpoint you need to add a new configuration to the `ozone-site.xml` file. - - ```xml - - hdds.prometheus.endpoint.enabled - true - -``` - -_Note_: for Docker compose based pseudo cluster put the \ -`OZONE-SITE.XML_hdds.prometheus.endpoint.enabled=true` line to the `docker-config` file. - -* Restart the Ozone Manager and Storage Container Manager and check the prometheus endpoints: - - * http://scm:9874/prom - - * http://ozoneManager:9876/prom - -* Create a prometheus.yaml configuration with the previous endpoints: - -```yaml -global: - scrape_interval: 15s - -scrape_configs: - - job_name: ozone - metrics_path: /prom - static_configs: - - targets: - - "scm:9876" - - "ozoneManager:9874" -``` - -* Start with prometheus from the directory where you have the prometheus.yaml file: - -```bash -prometheus -``` - -* Check the active targets in the prometheus web-ui: - -http://localhost:9090/targets - -![Prometheus target page example](prometheus.png) - - -* Check any metrics on the prometheus web ui.\ -For example: - -http://localhost:9090/graph?g0.range_input=1h&g0.expr=om_metrics_num_key_allocate&g0.tab=1 - -![Prometheus metrics page example](prometheus-key-allocate.png) - -## Note - -The ozone distribution contains a ready-to-use, dockerized environment to try out ozone and prometheus. It can be found under `compose/ozoneperf` directory. - -```bash -cd compose/ozoneperf -docker-compose up -d -``` \ No newline at end of file diff --git a/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md b/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md deleted file mode 100644 index 9f9d3478c9bca..0000000000000 --- a/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md +++ /dev/null @@ -1,188 +0,0 @@ ---- -title: Spark in Kubernetes with OzoneFS -linktitle: Spark -summary: How to use Apache Spark with Ozone on K8s? ---- - - -This recipe shows how Ozone object store can be used from Spark using: - - - OzoneFS (Hadoop compatible file system) - - Hadoop 2.7 (included in the Spark distribution) - - Kubernetes Spark scheduler - - Local spark client - - -## Requirements - -Download latest Spark and Ozone distribution and extract them. This method is -tested with the `spark-2.4.0-bin-hadoop2.7` distribution. - -You also need the following: - - * A container repository to push and pull the spark+ozone images. (In this recipe we will use the dockerhub) - * A repo/name for the custom containers (in this recipe _myrepo/ozone-spark_) - * A dedicated namespace in kubernetes (we use _yournamespace_ in this recipe) - -## Create the docker image for drivers - -### Create the base Spark driver/executor image - -First of all create a docker image with the Spark image creator. -Execute the following from the Spark distribution - -```bash -./bin/docker-image-tool.sh -r myrepo -t 2.4.0 build -``` - -_Note_: if you use Minikube add the `-m` flag to use the docker daemon of the Minikube image: - -```bash -./bin/docker-image-tool.sh -m -r myrepo -t 2.4.0 build -``` - -`./bin/docker-image-tool.sh` is an official Spark tool to create container images and this step will create multiple Spark container images with the name _myrepo/spark_. The first container will be used as a base container in the following steps. - -### Customize the docker image - -Create a new directory for customizing the created docker image. - -Copy the `ozone-site.xml` from the cluster: - -```bash -kubectl cp om-0:/opt/hadoop/etc/hadoop/ozone-site.xml . -``` - -And create a custom `core-site.xml`. - -```xml - - - fs.o3fs.impl - org.apache.hadoop.fs.ozone.BasicOzoneFileSystem - - - fs.AbstractFileSystem.o3fs.impl - org.apache.hadoop.fs.ozone.OzFs - - -``` - -_Note_: You may also use `org.apache.hadoop.fs.ozone.OzoneFileSystem` without the `Basic` prefix. The `Basic` version doesn't support FS statistics and encryption zones but can work together with older hadoop versions. - -Copy the `ozonefs.jar` file from an ozone distribution (__use the legacy version!__) - -``` -kubectl cp om-0:/opt/hadoop/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-0.4.0-SNAPSHOT.jar . -``` - - -Create a new Dockerfile and build the image: -``` -FROM myrepo/spark:2.4.0 -ADD core-site.xml /opt/hadoop/conf/core-site.xml -ADD ozone-site.xml /opt/hadoop/conf/ozone-site.xml -ENV HADOOP_CONF_DIR=/opt/hadoop/conf -ENV SPARK_EXTRA_CLASSPATH=/opt/hadoop/conf -ADD hadoop-ozone-filesystem-lib-legacy-0.4.0-SNAPSHOT.jar /opt/hadoop-ozone-filesystem-lib-legacy.jar -``` - -```bash -docker build -t myrepo/spark-ozone -``` - -For remote kubernetes cluster you may need to push it: - -```bash -docker push myrepo/spark-ozone -``` - -## Create a bucket and identify the ozonefs path - -Download any text file and put it to the `/tmp/alice.txt` first. - -```bash -kubectl port-forward s3g-0 9878:9878 -aws s3api --endpoint http://localhost:9878 create-bucket --bucket=test -aws s3api --endpoint http://localhost:9878 put-object --bucket test --key alice.txt --body /tmp/alice.txt -kubectl exec -it scm-0 ozone s3 path test -``` - -The output of the last command is something like this: - -``` -Volume name for S3Bucket is : s3asdlkjqiskjdsks -Ozone FileSystem Uri is : o3fs://test.s3asdlkjqiskjdsks -``` - -Write down the ozone filesystem uri as it should be used with the spark-submit command. - -## Create service account to use - -```bash -kubectl create serviceaccount spark -n yournamespace -kubectl create clusterrolebinding spark-role --clusterrole=edit --serviceaccount=yournamespace:spark --namespace=yournamespace -``` -## Execute the job - -Execute the following spark-submit command, but change at least the following values: - - * the kubernetes master url (you can check your _~/.kube/config_ to find the actual value) - * the kubernetes namespace (_yournamespace_ in this example) - * serviceAccountName (you can use the _spark_ value if you followed the previous steps) - * container.image (in this example this is _myrepo/spark-ozone_. This is pushed to the registry in the previous steps) - * location of the input file (o3fs://...), use the string which is identified earlier with the \ - `ozone s3 path ` command - -```bash -bin/spark-submit \ - --master k8s://https://kubernetes:6443 \ - --deploy-mode cluster \ - --name spark-word-count \ - --class org.apache.spark.examples.JavaWordCount \ - --conf spark.executor.instances=1 \ - --conf spark.kubernetes.namespace=yournamespace \ - --conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \ - --conf spark.kubernetes.container.image=myrepo/spark-ozone \ - --conf spark.kubernetes.container.image.pullPolicy=Always \ - --jars /opt/hadoop-ozone-filesystem-lib-legacy.jar \ - local:///opt/spark/examples/jars/spark-examples_2.11-2.4.0.jar \ - o3fs://bucket.volume/alice.txt -``` - -Check the available `spark-word-count-...` pods with `kubectl get pod` - -Check the output of the calculation with \ -`kubectl logs spark-word-count-1549973913699-driver` - -You should see the output of the wordcount job. For example: - -``` -... -name: 8 -William: 3 -this,': 1 -SOUP!': 1 -`Silence: 1 -`Mine: 1 -ordered.: 1 -considering: 3 -muttering: 3 -candle: 2 -... -``` diff --git a/hadoop-hdds/docs/content/recipe/_index.md b/hadoop-hdds/docs/content/recipe/_index.md deleted file mode 100644 index 47053ab6fbba8..0000000000000 --- a/hadoop-hdds/docs/content/recipe/_index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Recipes -date: "2017-10-10" -menu: main -weight: 9 - ---- - - - -{{}} - Standard how-to documents which describe how to use Ozone with other Software. - For example, how to use Ozone with Apache Spark. -{{}} diff --git a/hadoop-hdds/docs/content/recipe/prometheus-key-allocate.png b/hadoop-hdds/docs/content/recipe/prometheus-key-allocate.png deleted file mode 100644 index c934fc09d3c2a..0000000000000 Binary files a/hadoop-hdds/docs/content/recipe/prometheus-key-allocate.png and /dev/null differ diff --git a/hadoop-hdds/docs/content/recipe/prometheus.png b/hadoop-hdds/docs/content/recipe/prometheus.png deleted file mode 100644 index 12bbe55f58997..0000000000000 Binary files a/hadoop-hdds/docs/content/recipe/prometheus.png and /dev/null differ diff --git a/hadoop-hdds/docs/content/security/SecuityWithRanger.md b/hadoop-hdds/docs/content/security/SecuityWithRanger.md deleted file mode 100644 index cbbd53ec7c128..0000000000000 --- a/hadoop-hdds/docs/content/security/SecuityWithRanger.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: "Apache Ranger" -date: "2019-April-03" -weight: 5 -summary: Apache Ranger is a framework to enable, monitor and manage comprehensive data security across the Hadoop platform. -icon: user ---- - - - -Apache Ranger™ is a framework to enable, monitor and manage comprehensive data -security across the Hadoop platform. Any version of Apache Ranger which is greater -than 1.20 is aware of Ozone, and can manage an Ozone cluster. - - -To use Apache Ranger, you must have Apache Ranger installed in your Hadoop -Cluster. For installation instructions of Apache Ranger, Please take a look -at the [Apache Ranger website](https://ranger.apache.org/index.html). - -If you have a working Apache Ranger installation that is aware of Ozone, then -configuring Ozone to work with Apache Ranger is trivial. You have to enable -the ACLs support and set the acl authorizer class inside Ozone to be Ranger -authorizer. Please add the following properties to the ozone-site.xml. - -Property|Value ---------|------------------------------------------------------------ -ozone.acl.enabled | true -ozone.acl.authorizer.class| org.apache.ranger.authorization.ozone.authorizer.RangerOzoneAuthorizer diff --git a/hadoop-hdds/docs/content/security/SecureOzone.md b/hadoop-hdds/docs/content/security/SecureOzone.md deleted file mode 100644 index d4d836fcf7fe9..0000000000000 --- a/hadoop-hdds/docs/content/security/SecureOzone.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: "Securing Ozone" -date: "2019-April-03" -summary: Overview of Ozone security concepts and steps to secure Ozone Manager and SCM. -weight: 1 -icon: tower ---- - - - -# Kerberos - -Ozone depends on [Kerberos](https://web.mit.edu/kerberos/) to make the -clusters secure. Historically, HDFS has supported running in an isolated -secure networks where it is possible to deploy without securing the cluster. - -This release of Ozone follows that model, but soon will move to _secure by -default._ Today to enable security in ozone cluster, we need to set the -configuration **ozone.security.enabled** to _true_ and **hadoop.security.authentication** -to _kerberos_. - -Property|Value -----------------------|--------- -ozone.security.enabled| _true_ -hadoop.security.authentication| _kerberos_ - -# Tokens # - -Ozone uses a notion of tokens to avoid overburdening the Kerberos server. -When you serve thousands of requests per second, involving Kerberos might not -work well. Hence once an authentication is done, Ozone issues delegation -tokens and block tokens to the clients. These tokens allow applications to do -specified operations against the cluster, as if they have kerberos tickets -with them. Ozone supports following kinds of tokens. - -### Delegation Token ### -Delegation tokens allow an application to impersonate a users kerberos -credentials. This token is based on verification of kerberos identity and is -issued by the Ozone Manager. Delegation tokens are enabled by default when -security is enabled. - -### Block Token ### - -Block tokens allow a client to read or write a block. This is needed so that -data nodes know that the user/client has permission to read or make -modifications to the block. - -### S3Token ### - -S3 uses a very different shared secret security scheme. Ozone supports the AWS Signature Version 4 protocol, -and from the end users perspective Ozone's s3 feels exactly like AWS S3. - -The S3 credential tokens are called S3 tokens in the code. These tokens are -also enabled by default when security is enabled. - - -Each of the service daemons that make up Ozone needs a Kerberos service -principal name and a corresponding [kerberos key tab](https://web.mit.edu/kerberos/krb5-latest/doc/basic/keytab_def.html) file. - -All these settings should be made in ozone-site.xml. - -
-
-
-

Storage Container Manager

-

-
- SCM requires two Kerberos principals, and the corresponding key tab files - for both of these principals. -
- - - - - - - - - - - - - - - - - - - - - -
PropertyDescription
hdds.scm.kerberos.principal - The SCM service principal.
e.g. scm/_HOST@REALM.COM
hdds.scm.kerberos.keytab.file - The keytab file used by SCM daemon to login as its service principal.
hdds.scm.http.kerberos.principal - SCM http server service principal.
hdds.scm.http.kerberos.keytab - The keytab file used by SCM http server to login as its service principal.
-

-
-
-
-

Ozone Manager

-

-
- Like SCM, OM also requires two Kerberos principals, and the - corresponding key tab files for both of these principals. -
- - - - - - - - - - - - - - - - - - - - - -
PropertyDescription
ozone.om.kerberos.principal - The OzoneManager service principal.
e.g. om/_HOST@REALM.COM
ozone.om.kerberos.keytab.file - TThe keytab file used by SCM daemon to login as its service principal.
ozone.om.http.kerberos.principal - Ozone Manager http server service principal.
ozone.om.http.kerberos.keytab - The keytab file used by OM http server to login as its service principal.
-

-
-
-
-

S3 Gateway

-

-
- S3 gateway requires one service principal and here the configuration values - needed in the ozone-site.xml. -
- - - - - - - - - - - - - - - -
PropertyDescription
ozone.s3g.authentication.kerberos.principal - S3 Gateway principal.
e.g. HTTP/_HOST@EXAMPLE.COM
ozone.s3g.keytab.file - The keytab file used by S3 gateway
-

-
-
diff --git a/hadoop-hdds/docs/content/security/SecuringDatanodes.md b/hadoop-hdds/docs/content/security/SecuringDatanodes.md deleted file mode 100644 index 6b7d82365cbfe..0000000000000 --- a/hadoop-hdds/docs/content/security/SecuringDatanodes.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: "Securing Datanodes" -date: "2019-April-03" -weight: 2 -summary: Explains different modes of securing data nodes. These range from kerberos to auto approval. -icon: th ---- - - - -Datanodes under Hadoop is traditionally secured by creating a Keytab file on -the data nodes. With Ozone, we have moved away to using data node -certificates. That is, Kerberos on data nodes is not needed in case of a -secure Ozone cluster. - -However, we support the legacy Kerberos based Authentication to make it easy -for the current set of users.The HDFS configuration keys are the following -that is setup in hdfs-site.xml. - -Property|Description ---------|-------------- -dfs.datanode.kerberos.principal|The datanode service principal.
e.g. dn/_HOST@REALM.COM -dfs.datanode.keytab.file| The keytab file used by datanode daemon to login as its service principal. -hdds.datanode.http.kerberos.principal| Datanode http server service principal. -hdds.datanode.http.kerberos.keytab| The keytab file used by datanode http server to login as its service principal. - - -## How a data node becomes secure. - -Under Ozone, when a data node boots up and discovers SCM's address, the first -thing that data node does is to create a private key and send a certificate -request to the SCM. - -

Certificate Approval via Kerberos Current Model

-SCM has a built-in CA, and SCM has to approve this request. If the data node -already has a Kerberos key tab, then SCM will trust Kerberos credentials and -issue a certificate automatically. - - -

Manual Approval In Progress

-If these are band new data nodes and Kerberos key tabs are not present at the -data nodes, then this request for the data nodes identity certificate is -queued up for approval from the administrator(This is work in progress, -not committed in Ozone yet). In other words, the web of trust is established -by the administrator of the cluster. - -

Automatic Approval In Progress

-If you running under an container orchestrator like Kubernetes, we rely on -Kubernetes to create a one-time token that will be given to data node during -boot time to prove the identity of the data node container (This is also work -in progress.) - - -Once a certificate is issued, a data node is secure and Ozone manager can -issue block tokens. If there is no data node certificates or the SCM's root -certificate is not present in the data node, then data node will register -itself and down load the SCM's root certificate as well get the certificates -for itself. diff --git a/hadoop-hdds/docs/content/security/SecuringS3.md b/hadoop-hdds/docs/content/security/SecuringS3.md deleted file mode 100644 index 1cb0c809e6116..0000000000000 --- a/hadoop-hdds/docs/content/security/SecuringS3.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: "Securing S3" -date: "2019-April-03" -summary: Ozone supports S3 protocol, and uses AWS Signature Version 4 protocol which allows a seamless S3 experience. -weight: 4 -icon: cloud ---- - - -To access an S3 bucket, users need AWS access key ID and AWS secret. Both of -these are generated by going to AWS website. When you use Ozone's S3 -protocol, you need the same AWS access key and secret. - -Under Ozone, the clients can download the access key directly from Ozone. -The user needs to `kinit` first and once they have authenticated via kerberos - they can download the S3 access key ID and AWS secret. Just like AWS S3, - both of these are secrets that needs to be protected by the client since it - gives full access to the S3 buckets. - - -* S3 clients can get the secret access id and user secret from OzoneManager. - -```bash -ozone s3 getsecret -``` -This command will talk to ozone, validate the user via kerberos and generate -the AWS credentials. The values will be printed out on the screen. You can -set these values up in your _.aws_ file for automatic access while working -against Ozone S3 buckets. - - - - -* Now you can proceed to setup these secrets in aws configs: - -```bash -aws configure set default.s3.signature_version s3v4 -aws configure set aws_access_key_id ${accessId} -aws configure set aws_secret_access_key ${secret} -aws configure set region us-west-1 -``` -Please refer to AWS S3 documentation on how to use S3 via command line or via -S3 API. diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.md b/hadoop-hdds/docs/content/security/SecuringTDE.md deleted file mode 100644 index 3e8f2d16819fe..0000000000000 --- a/hadoop-hdds/docs/content/security/SecuringTDE.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: "Transparent Data Encryption" -date: "2019-April-03" -summary: TDE allows data on the disks to be encrypted-at-rest and automatically decrypted during access. You can enable this per key or per bucket. -weight: 3 -icon: lock ---- - - -Ozone TDE setup process and usage are very similar to HDFS TDE. -The major difference is that Ozone TDE is enabled at Ozone bucket level -when a bucket is created. - -### Setting up the Key Management Server - -To use TDE, clients must setup a Key Management Server and provide that URI to -Ozone/HDFS. Since Ozone and HDFS can use the same Key Management Server, this - configuration can be provided via *hdfs-site.xml*. - -Property| Value ------------------------------------|----------------------------------------- -hadoop.security.key.provider.path | KMS uri.
e.g. kms://http@kms-host:9600/kms - -### Using Transparent Data Encryption -If this is already configured for your cluster, then you can simply proceed -to create the encryption key and enable encrypted buckets. - -To create an encrypted bucket, client need to: - - * Create a bucket encryption key with hadoop key CLI, which is similar to - how you would use HDFS encryption zones. - - ```bash - hadoop key create encKey - ``` - The above command creates an encryption key for the bucket you want to protect. - Once the key is created, you can tell Ozone to use that key when you are - reading and writing data into a bucket. - - * Assign the encryption key to a bucket. - - ```bash - ozone sh bucket create -k encKey /vol/encryptedBucket - ``` - -After this command, all data written to the _encryptedBucket_ will be encrypted -via the encKey and while reading the clients will talk to Key Management -Server and read the key and decrypt it. In other words, the data stored -inside Ozone is always encrypted. The fact that data is encrypted at rest -will be completely transparent to the clients and end users. diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.md b/hadoop-hdds/docs/content/security/SecurityAcls.md deleted file mode 100644 index 31bbb0a95cc2a..0000000000000 --- a/hadoop-hdds/docs/content/security/SecurityAcls.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: "Ozone ACLs" -date: "2019-April-03" -weight: 6 -summary: Native Ozone Authorizer provides Access Control List (ACL) support for Ozone without Ranger integration. -icon: transfer ---- - - -Ozone supports a set of native ACLs. These ACLs can be used independently or -along with Ranger. If Apache Ranger is enabled, then ACL will be checked -first with Ranger and then Ozone's internal ACLs will be evaluated. - -Ozone ACLs are a super set of Posix and S3 ACLs. - -The general format of an ACL is _object_:_who_:_rights_. - -Where an _object_ can be: - -1. **Volume** - An Ozone volume. e.g. _/volume_ -2. **Bucket** - An Ozone bucket. e.g. _/volume/bucket_ -3. **Key** - An object key or an object. e.g. _/volume/bucket/key_ -4. **Prefix** - A path prefix for a specific key. e.g. _/volume/bucket/prefix1/prefix2_ - -Where a _who_ can be: - -1. **User** - A user in the Kerberos domain. User like in Posix world can be -named or unnamed. -2. **Group** - A group in the Kerberos domain. Group also like in Posix world -can -be named or unnamed. -3. **World** - All authenticated users in the Kerberos domain. This maps to -others in the Posix domain. -4. **Anonymous** - Ignore the user field completely. This is an extension to -the Posix semantics, This is needed for S3 protocol, where we express that -we have no way of knowing who the user is or we don't care. - - - - -Where a _right_ can be: - -1. **Create** – This ACL provides a user the ability to create buckets in a -volume and keys in a bucket. Please note: Under Ozone, Only admins can create volumes. -2. **List** – This ACL allows listing of buckets and keys. This ACL is attached - to the volume and buckets which allow listing of the child objects. Please note: The user and admins can list the volumes owned by the user. -3. **Delete** – Allows the user to delete a volume, bucket or key. -4. **Read** – Allows the user to read the metadata of a Volume and Bucket and -data stream and metadata of a key. -5. **Write** - Allows the user to write the metadata of a Volume and Bucket and -allows the user to overwrite an existing ozone key. -6. **Read_ACL** – Allows a user to read the ACL on a specific object. -7. **Write_ACL** – Allows a user to write the ACL on a specific object. - -

Ozone Native ACL APIs

- -The ACLs can be manipulated by a set of APIs supported by Ozone. The APIs -supported are: - -1. **SetAcl** – This API will take user principal, the name, type -of the ozone object and a list of ACLs. -2. **GetAcl** – This API will take the name and type of the ozone object -and will return a list of ACLs. -3. **AddAcl** - This API will take the name, type of the ozone object, the -ACL, and add it to existing ACL entries of the ozone object. -4. **RemoveAcl** - This API will take the name, type of the -ozone object and the ACL that has to be removed. diff --git a/hadoop-hdds/docs/content/security/_index.md b/hadoop-hdds/docs/content/security/_index.md deleted file mode 100644 index 20967e3343be0..0000000000000 --- a/hadoop-hdds/docs/content/security/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Security -name: Security -identifier: SecureOzone -menu: main -weight: 5 ---- - - -{{}} - Ozone is an enterprise class, secure storage system. There are many - optional security features in Ozone. Following pages discuss how - you can leverage the security features of Ozone. -{{}} - - - -Depending on your needs, there are multiple optional steps in securing ozone. diff --git a/hadoop-hdds/docs/content/shell/BucketCommands.md b/hadoop-hdds/docs/content/shell/BucketCommands.md deleted file mode 100644 index e81734924fb58..0000000000000 --- a/hadoop-hdds/docs/content/shell/BucketCommands.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: Bucket Commands -summary: Bucket commands help you to manage the life cycle of a volume. -weight: 3 ---- - - -Ozone shell supports the following bucket commands. - - * [create](#create) - * [delete](#delete) - * [info](#info) - * [list](#list) - -### Create - -The `bucket create` command allows users to create a bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -g, \-\-enforcegdpr | Optional, if set to true it creates a GDPR compliant bucket, if not specified or set to false, it creates an ordinary bucket. -| Uri | The name of the bucket in **/volume/bucket** format. - - -{{< highlight bash >}} -ozone sh bucket create /hive/jan -{{< /highlight >}} - -The above command will create a bucket called _jan_ in the _hive_ volume. -Since no scheme was specified this command defaults to O3 (RPC) protocol. - -### Delete - -The `bucket delete` command allows users to delete a bucket. If the -bucket is not empty then this command will fail. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket - -{{< highlight bash >}} -ozone sh bucket delete /hive/jan -{{< /highlight >}} - -The above command will delete _jan_ bucket if it is empty. - -### Info - -The `bucket info` commands returns the information about the bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket. - -{{< highlight bash >}} -ozone sh bucket info /hive/jan -{{< /highlight >}} - -The above command will print out the information about _jan_ bucket. - -### List - -The `bucket list` command allows users to list the buckets in a volume. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -l, \-\-length | Maximum number of results to return. Default: 100 -| -p, \-\-prefix | Optional, Only buckets that match this prefix will be returned. -| -s, \-\-start | The listing will start from key after the start key. -| Uri | The name of the _volume_. - -{{< highlight bash >}} -ozone sh bucket list /hive -{{< /highlight >}} - -This command will list all buckets on the volume _hive_. diff --git a/hadoop-hdds/docs/content/shell/Format.md b/hadoop-hdds/docs/content/shell/Format.md deleted file mode 100644 index 72174c9ae9a49..0000000000000 --- a/hadoop-hdds/docs/content/shell/Format.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Shell Overview -summary: Explains the command syntax used by shell command. -weight: 1 ---- - - -Ozone shell help can be invoked at _object_ level or at _action_ level. -For example: - -{{< highlight bash >}} -ozone sh volume --help -{{< /highlight >}} - -This will show all possible actions for volumes. - -or it can be invoked to explain a specific action like -{{< highlight bash >}} -ozone sh volume create --help -{{< /highlight >}} -This command will give you command line options of the create command. - -

- - -### General Command Format - -The Ozone shell commands take the following format. - -> _ozone sh object action url_ - -**ozone** script is used to invoke all Ozone sub-commands. The ozone shell is -invoked via ```sh``` command. - -The object can be a volume, bucket or a key. The action is various verbs like -create, list, delete etc. - - -Ozone URL can point to a volume, bucket or keys in the following format: - -_\[scheme\]\[server:port\]/volume/bucket/key_ - - -Where, - -1. **Scheme** - This should be `o3` which is the native RPC protocol to access - Ozone API. The usage of the schema is optional. - -2. **Server:Port** - This is the address of the Ozone Manager. If the port is -omitted the default port from ozone-site.xml will be used. - -Depending on the call, the volume/bucket/key names will be part of the URL. -Please see volume commands, bucket commands, and key commands section for more -detail. diff --git a/hadoop-hdds/docs/content/shell/KeyCommands.md b/hadoop-hdds/docs/content/shell/KeyCommands.md deleted file mode 100644 index b4a38c8b1b521..0000000000000 --- a/hadoop-hdds/docs/content/shell/KeyCommands.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: Key Commands -summary: Key commands help you to manage the life cycle of - Keys / Objects. -weight: 4 ---- - - - -Ozone shell supports the following key commands. - - * [get](#get) - * [put](#put) - * [delete](#delete) - * [info](#info) - * [list](#list) - * [rename](#rename) - - -### Get - -The `key get` command downloads a key from Ozone cluster to local file system. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key in **/volume/bucket/key** format. -| FileName | Local file to download the key to. - - -{{< highlight bash >}} -ozone sh key get /hive/jan/sales.orc sales.orc -{{< /highlight >}} -Downloads the file sales.orc from the _/hive/jan_ bucket and writes to the -local file sales.orc. - -### Put - -The `key put` command uploads a file from the local file system to the specified bucket. - -***Params:*** - - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key in **/volume/bucket/key** format. -| FileName | Local file to upload. -| -r, \-\-replication | Optional, Number of copies, ONE or THREE are the options. Picks up the default from cluster configuration. - -{{< highlight bash >}} -ozone sh key put /hive/jan/corrected-sales.orc sales.orc -{{< /highlight >}} -The above command will put the sales.orc as a new key into _/hive/jan/corrected-sales.orc_. - -### Delete - -The `key delete` command removes the key from the bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key. - -{{< highlight bash >}} -ozone sh key delete /hive/jan/corrected-sales.orc -{{< /highlight >}} - -The above command deletes the key _/hive/jan/corrected-sales.orc_. - - -### Info - -The `key info` commands returns the information about the key. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key. - -{{< highlight bash >}} -ozone sh key info /hive/jan/sales.orc -{{< /highlight >}} - -The above command will print out the information about _/hive/jan/sales.orc_ -key. - -### List - -The `key list` command allows user to list all keys in a bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -l, \-\-length | Maximum number of results to return. Default: 1000 -| -p, \-\-prefix | Optional, Only buckets that match this prefix will be returned. -| -s, \-\-start | The listing will start from key after the start key. -| Uri | The name of the _volume_. - -{{< highlight bash >}} -ozone sh key list /hive/jan -{{< /highlight >}} - -This command will list all keys in the bucket _/hive/jan_. - -### Rename - -The `key rename` command changes the name of an existing key in the specified bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket in **/volume/bucket** format. -| FromKey | The existing key to be renamed -| ToKey | The new desired name of the key - -{{< highlight bash >}} -ozone sh key rename /hive/jan sales.orc new_name.orc -{{< /highlight >}} -The above command will rename _sales.orc_ to _new\_name.orc_ in the bucket _/hive/jan_. diff --git a/hadoop-hdds/docs/content/shell/VolumeCommands.md b/hadoop-hdds/docs/content/shell/VolumeCommands.md deleted file mode 100644 index 47fb9852b863e..0000000000000 --- a/hadoop-hdds/docs/content/shell/VolumeCommands.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Volume Commands -weight: 2 -summary: Volume commands help you to manage the life cycle of a volume. ---- - - -Volume commands generally need administrator privileges. The ozone shell supports the following volume commands. - - * [create](#create) - * [delete](#delete) - * [info](#info) - * [list](#list) - * [update](#update) - -### Create - -The `volume create` command allows an administrator to create a volume and -assign it to a user. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -q, \-\-quota | Optional, This argument that specifies the maximum size this volume can use in the Ozone cluster. | -| -u, \-\-user | Required, The name of the user who owns this volume. This user can create, buckets and keys on this volume. | -| Uri | The name of the volume. | - -{{< highlight bash >}} -ozone sh volume create --quota=1TB --user=bilbo /hive -{{< /highlight >}} - -The above command will create a volume called _hive_ on the ozone cluster. This -volume has a quota of 1TB, and the owner is _bilbo_. - -### Delete - -The `volume delete` command allows an administrator to delete a volume. If the -volume is not empty then this command will fail. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the volume. - -{{< highlight bash >}} -ozone sh volume delete /hive -{{< /highlight >}} - -The above command will delete the volume hive, if the volume has no buckets -inside it. - -### Info - -The `volume info` commands returns the information about the volume including -quota and owner information. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the volume. - -{{< highlight bash >}} -ozone sh volume info /hive -{{< /highlight >}} - -The above command will print out the information about hive volume. - -### List - -The `volume list` command will list the volumes owned by a user. - -{{< highlight bash >}} -ozone sh volume list --user hadoop -{{< /highlight >}} - -The above command will print out all the volumes owned by the user hadoop. - -### Update - -The volume update command allows changing of owner and quota on a given volume. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -q, \-\-quota | Optional, This argument that specifies the maximum size this volume can use in the Ozone cluster. | -| -u, \-\-user | Optional, The name of the user who owns this volume. This user can create, buckets and keys on this volume. | -| Uri | The name of the volume. | - -{{< highlight bash >}} -ozone sh volume update --quota=10TB /hive -{{< /highlight >}} - -The above command updates the volume quota to 10TB. diff --git a/hadoop-hdds/docs/content/shell/_index.md b/hadoop-hdds/docs/content/shell/_index.md deleted file mode 100644 index 3cb1a9f61672b..0000000000000 --- a/hadoop-hdds/docs/content/shell/_index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Command Line Interface -menu: - main: - weight: 3 ---- - - - -{{}} - Ozone shell is the primary interface to interact with Ozone. - It provides a command shell interface to work against Ozone. -{{}} diff --git a/hadoop-hdds/docs/content/start/FromSource.md b/hadoop-hdds/docs/content/start/FromSource.md deleted file mode 100644 index 1e920d97cfc1f..0000000000000 --- a/hadoop-hdds/docs/content/start/FromSource.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: From Source -weight: 30 ---- - - -{{< requirements >}} - * Java 1.8 - * Maven - * Protoc (2.5) -{{< /requirements >}} - - - -If you are a Hadoop ninja, and wise in the ways of Apache, you already know -that a real Apache release is a source release. - -If you want to build from sources, Please untar the source tarball and run -the ozone build command. This instruction assumes that you have all the -dependencies to build Hadoop on your build machine. If you need instructions -on how to build Hadoop, please look at the Apache Hadoop Website. - -```bash -mvn -f pom.ozone.xml clean package -DskipTests=true -``` - -This will build an ozone-\.tar.gz in your `hadoop-ozone/dist/target` directory. - -You can copy this tarball and use this instead of binary artifacts that are -provided along with the official release. - -## How to test the build - -You can run the acceptance tests in the hadoop-ozone directory to make sure -that your build is functional. To launch the acceptance tests, please follow - the instructions in the **README.md** in the `smoketest` directory. - -```bash -cd smoketest -./test.sh -``` - - You can also execute only a minimal subset of the tests: - -```bash -cd smoketest -./test.sh --env ozone basic -``` - -Acceptance tests will start a small ozone cluster and verify that ozone shell and ozone file - system is fully functional. diff --git a/hadoop-hdds/docs/content/start/Kubernetes.md b/hadoop-hdds/docs/content/start/Kubernetes.md deleted file mode 100644 index ad855341aa075..0000000000000 --- a/hadoop-hdds/docs/content/start/Kubernetes.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Ozone on Kubernetes -weight: 22 ---- - - - -{{< requirements >}} - * Working kubernetes cluster (LoadBalancer, PersistentVolume are not required) - * kubectl -{{< /requirements >}} - - -As the _apache/ozone_ docker images are available from the dockerhub the deployment process is very similar to Minikube deployment. The only big difference is that we have dedicated set of k8s files for hosted clusters (for example we can use one datanode per host) -Deploy to kubernetes - -`kubernetes/examples` folder of the ozone distribution contains kubernetes deployment resource files for multiple use cases. - -To deploy to a hosted cluster use the ozone subdirectory: - -``` -cd kubernetes/examples/ozone -kubectl apply -f . -``` - -And you can check the results with - -``` -kubectl get pod -Access the services -``` - -Now you can access any of the services. By default the services are not published but you can access them with port-foward rules. - -``` -kubectl port-forward s3g-0 9878:9878 -kubectl port-forward scm-0 9876:9876 -``` diff --git a/hadoop-hdds/docs/content/start/Minikube.md b/hadoop-hdds/docs/content/start/Minikube.md deleted file mode 100644 index ebb249d1337d2..0000000000000 --- a/hadoop-hdds/docs/content/start/Minikube.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Minikube & Ozone -weight: 21 ---- - - - -{{< requirements >}} - * Working minikube setup - * kubectl -{{< /requirements >}} - -`kubernetes/examples` folder of the ozone distribution contains kubernetes deployment resource files for multiple use cases. By default the kubernetes resource files are configured to use `apache/ozone` image from the dockerhub. - -To deploy it to minikube use the minikube configuration set: - -``` -cd kubernetes/examples/minikube -kubectl apply -f . -``` - -And you can check the results with - -``` -kubectl get pod -``` - -Note: the kubernetes/examples/minikube resource set is optimized for minikube usage: - - * You can have multiple datanodes even if you have only one host (in a real production cluster usually you need one datanode per physical host) - * The services are published with node port - -## Access the services - -Now you can access any of the services. For each web endpoint an additional NodeType service is defined in the minikube k8s resource set. NodeType services are available via a generated port of any of the host nodes: - -```bash -kubectl get svc -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -datanode ClusterIP None 27s -kubernetes ClusterIP 10.96.0.1 443/TCP 118m -om ClusterIP None 9874/TCP 27s -om-public NodePort 10.108.48.148 9874:32649/TCP 27s -s3g ClusterIP None 9878/TCP 27s -s3g-public NodePort 10.97.133.137 9878:31880/TCP 27s -scm ClusterIP None 9876/TCP 27s -scm-public NodePort 10.105.231.28 9876:32171/TCP 27s -``` - -Minikube contains a convenience command to access any of the NodePort services: - -``` -minikube service s3g-public -Opening kubernetes service default/s3g-public in default browser... -``` \ No newline at end of file diff --git a/hadoop-hdds/docs/content/start/OnPrem.md b/hadoop-hdds/docs/content/start/OnPrem.md deleted file mode 100644 index 3bf40a6a767fe..0000000000000 --- a/hadoop-hdds/docs/content/start/OnPrem.md +++ /dev/null @@ -1,187 +0,0 @@ ---- -title: Ozone On Premise Installation -weight: 20 - ---- - - -If you are feeling adventurous, you can setup ozone in a real cluster. -Setting up a real cluster requires us to understand the components of Ozone. -Ozone is designed to work concurrently with HDFS. However, Ozone is also -capable of running independently. The components of ozone are the same in both approaches. - -## Ozone Components - -1. Ozone Manager - Is the server that is in charge of the namespace of Ozone. Ozone Manager is responsible for all volume, bucket and key operations. -2. Storage Container Manager - Acts as the block manager. Ozone Manager -requests blocks from SCM, to which clients can write data. -3. Datanodes - Ozone data node code runs inside the HDFS datanode or in the independent deployment case runs an ozone datanode daemon. - -## Setting up an Ozone only cluster - -* Please untar the ozone-\ to the directory where you are going -to run Ozone from. We need Ozone jars on all machines in the cluster. So you -need to do this on all machines in the cluster. - -* Ozone relies on a configuration file called ```ozone-site.xml```. To -generate a template that you can replace with proper values, please run the -following command. This will generate a template called ```ozone-site.xml``` at -the specified path (directory). - -{{< highlight bash >}} -ozone genconf -{{< /highlight >}} - -Let us look at the settings inside the generated file (ozone-site.xml) and -how they control ozone. Once the right values are defined, this file -needs to be copied to ```ozone directory/etc/hadoop```. - - -* **ozone.enabled** This is the most critical setting for ozone. -Ozone is a work in progress and users have to enable this service explicitly. -By default, Ozone is disabled. Setting this flag to `true` enables ozone in the -HDFS or Ozone cluster. - -Here is an example, - -{{< highlight xml >}} - - ozone.enabled - true - -{{< /highlight >}} - -* **ozone.metadata.dirs** Allows Administrators to specify where the - metadata must reside. Usually you pick your fastest disk (SSD if - you have them on your nodes). OzoneManager, SCM and datanode will write the - metadata to this path. This is a required setting, if this is missing Ozone - will fail to come up. - - Here is an example, - -{{< highlight xml >}} - - ozone.metadata.dirs - /data/disk1/meta - -{{< /highlight >}} - -* **ozone.scm.names** Storage container manager(SCM) is a distributed block - service which is used by ozone. This property allows data nodes to discover - SCM's address. Data nodes send heartbeat to SCM. - Until HA feature is complete, we configure ozone.scm.names to be a - single machine. - - Here is an example, - - {{< highlight xml >}} - - ozone.scm.names - scm.hadoop.apache.org - - {{< /highlight >}} - - * **ozone.scm.datanode.id.dir** Data nodes generate a Unique ID called Datanode - ID. This identity is written to the file datanode.id in a directory specified by this path. *Data nodes - will create this path if it doesn't exist already.* - -Here is an example, -{{< highlight xml >}} - - ozone.scm.datanode.id.dir - /data/disk1/meta/node - -{{< /highlight >}} - -* **ozone.om.address** OM server address. This is used by OzoneClient and -Ozone File System. - -Here is an example, -{{< highlight xml >}} - - ozone.om.address - ozonemanager.hadoop.apache.org - -{{< /highlight >}} - - -## Ozone Settings Summary - -| Setting | Value | Comment | -|--------------------------------|------------------------------|------------------------------------------------------------------| -| ozone.enabled | true | This enables SCM and containers in HDFS cluster. | -| ozone.metadata.dirs | file path | The metadata will be stored here. | -| ozone.scm.names | SCM server name | Hostname:port or IP:port address of SCM. | -| ozone.scm.block.client.address | SCM server name and port | Used by services like OM | -| ozone.scm.client.address | SCM server name and port | Used by client-side | -| ozone.scm.datanode.address | SCM server name and port | Used by datanode to talk to SCM | -| ozone.om.address | OM server name | Used by Ozone handler and Ozone file system. | - - -## Startup the cluster - -Before we boot up the Ozone cluster, we need to initialize both SCM and Ozone Manager. - -{{< highlight bash >}} -ozone scm --init -{{< /highlight >}} -This allows SCM to create the cluster Identity and initialize its state. -The ```init``` command is similar to Namenode format. Init command is executed only once, that allows SCM to create all the required on-disk structures to work correctly. -{{< highlight bash >}} -ozone --daemon start scm -{{< /highlight >}} - -Once we know SCM is up and running, we can create an Object Store for our use. This is done by running the following command. - -{{< highlight bash >}} -ozone om --init -{{< /highlight >}} - - -Once Ozone manager is initialized, we are ready to run the name service. - -{{< highlight bash >}} -ozone --daemon start om -{{< /highlight >}} - -At this point Ozone's name services, the Ozone manager, and the block service SCM is both running.\ -**Please note**: If SCM is not running -```om --init``` command will fail. SCM start will fail if on-disk data structures are missing. So please make sure you have done both ```scm --init``` and ```om --init``` commands. - -Now we need to start the data nodes. Please run the following command on each datanode. -{{< highlight bash >}} -ozone --daemon start datanode -{{< /highlight >}} - -At this point SCM, Ozone Manager and data nodes are up and running. - -***Congratulations!, You have set up a functional ozone cluster.*** - -## Shortcut - -If you want to make your life simpler, you can just run -{{< highlight bash >}} -ozone scm --init -ozone om --init -start-ozone.sh -{{< /highlight >}} - -This assumes that you have set up the slaves file correctly and ssh -configuration that allows ssh-ing to all data nodes. This is the same as the -HDFS configuration, so please refer to HDFS documentation on how to set this -up. diff --git a/hadoop-hdds/docs/content/start/RunningViaDocker.md b/hadoop-hdds/docs/content/start/RunningViaDocker.md deleted file mode 100644 index 9e1e361122538..0000000000000 --- a/hadoop-hdds/docs/content/start/RunningViaDocker.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Pseudo-cluster -weight: 23 - ---- - - -{{< requirements >}} - * docker and docker-compose -{{< /requirements >}} - -* Download the Ozone binary tarball and untar it. - -* Go to the directory where the docker compose files exist and tell -`docker-compose` to start Ozone in the background. This will start a small -ozone instance on your machine. - -{{< highlight bash >}} -cd compose/ozone/ - -docker-compose up -d -{{< /highlight >}} - -To verify that ozone is working as expected, let us log into a data node and -run _freon_, the load generator for Ozone. The ```exec datanode bash``` command -will open a bash shell on the datanode. - -The `ozone freon` command is executed within the datanode container. You can quit freon via CTRL-C any time. The -```rk``` profile instructs freon to generate random keys. - -{{< highlight bash >}} -docker-compose exec datanode bash -ozone freon rk -{{< /highlight >}} - -You can check out the **OzoneManager UI** at http://localhost:9874/ to see the -activity generated by freon. -While you are there, please don't forget to check out the ozone configuration explorer. - -***Congratulations, You have just run your first ozone cluster.*** - -To shutdown the cluster, please run -{{< highlight bash >}} -docker-compose down -{{< /highlight >}} - diff --git a/hadoop-hdds/docs/content/start/StartFromDockerHub.md b/hadoop-hdds/docs/content/start/StartFromDockerHub.md deleted file mode 100644 index e3e7d41cce68b..0000000000000 --- a/hadoop-hdds/docs/content/start/StartFromDockerHub.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Simple Single Ozone -weight: 10 - ---- - - -{{< requirements >}} - * Working docker setup - * AWS CLI (optional) -{{< /requirements >}} - -# Ozone in a Single Container - -The easiest way to start up an all-in-one ozone container is to use the latest -docker image from docker hub: - -```bash -docker run -p 9878:9878 -p 9876:9876 apache/ozone -``` -This command will pull down the ozone image from docker hub and start all -ozone services in a single container.
-This container will run the required metadata servers (Ozone Manager, Storage -Container Manager) one data node and the S3 compatible REST server -(S3 Gateway). - -# Local multi-container cluster - -If you would like to use a more realistic pseudo-cluster where each components -run in own containers, you can start it with a docker-compose file. - -We have shipped a docker-compose and an enviorment file as part of the -container image that is uploaded to docker hub. - -The following commands can be used to extract these files from the image in the docker hub. -```bash -docker run apache/ozone cat docker-compose.yaml > docker-compose.yaml -docker run apache/ozone cat docker-config > docker-config -``` - - Now you can start the cluster with docker-compose: - -```bash -docker-compose up -d -``` - -If you need multiple datanodes, we can just scale it up: - -```bash - docker-compose scale datanode=3 - ``` -# Running S3 Clients - -Once the cluster is booted up and ready, you can verify its status by -connecting to the SCM's UI at [http://localhost:9876](http://localhost:9876). - -The S3 gateway endpoint will be exposed at port 9878. You can use Ozone's S3 -support as if you are working against the real S3. - - -Here is how you create buckets from command line: - -```bash -aws s3api --endpoint http://localhost:9878/ create-bucket --bucket=bucket1 -``` - -Only notable difference in the above command line is the fact that you have -to tell the _endpoint_ address to the aws s3api command. - -Now let us put a simple file into the S3 Bucket hosted by Ozone. We will -start by creating a temporary file that we can upload to Ozone via S3 support. -```bash -ls -1 > /tmp/testfile - ``` - This command creates a temporary file that - we can upload to Ozone. The next command actually uploads to Ozone's S3 - bucket using the standard aws s3 command line interface. - -```bash -aws s3 --endpoint http://localhost:9878 cp --storage-class REDUCED_REDUNDANCY /tmp/testfile s3://bucket1/testfile -``` - -We can now verify that file got uploaded by running the list command against -our bucket. - -```bash -aws s3 --endpoint http://localhost:9878 ls s3://bucket1/testfile -``` - - -http://localhost:9878/bucket1?browser diff --git a/hadoop-hdds/docs/content/start/_index.md b/hadoop-hdds/docs/content/start/_index.md deleted file mode 100644 index 5529661b0a21f..0000000000000 --- a/hadoop-hdds/docs/content/start/_index.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Getting Started -name: Getting Started -identifier: Starting -menu: main -weight: 1 -cards: "false" ---- - - - -{{}} -There are many ways to install and run Ozone. Starting from simple docker -deployments on -local nodes, to full scale multi-node cluster deployment on -Kubernetes or bare-metal. -{{}} - -
- -Easy Start - -

Running Ozone from Docker Hub

- -You can try out Ozone using docker hub without downloading the official release. This makes it easy to explore Ozone. -
- {{}} - The simplest and easiest way to start an ozone cluster - to explore what it can do is to start ozone via docker. - {{}} - -
- -
- -Recommended - - -

Running Ozone from an Official Release

- - Apache Ozone can also be run from the official release packages. Along with the official source releases, we also release a set of convenience binary packages. It is easy to run these binaries in different configurations. -
- {{}} -Ozone is designed to work concurrently with HDFS. The physical cluster instructions explain each component of Ozone and how to deploy with maximum control. - {{}} - - {{}} -Ozone is designed to work well under Kubernetes. These are instructions to deploy Ozone on K8s. Ozone provides a replicated storage solution for K8s based apps. - {{}} - - {{}} -Ozone comes with a standard set of K8s resources. You can deploy them to MiniKube and experiment with the K8s based deployments. - {{}} - - {{}} - We also ship standard docker files with official release. These are part of official release and not depend upon Docker Hub. - {{}} - -
- -
- -Hadoop Ninja - -

Building From Sources

- - Instructions to build Ozone from source to create deployment packages. - - {{}} -If you are a Hadoop ninja, and wise in the ways of Apache, you already know that a real Apache release is a source release. We believe that even ninjas need help at times. - {{}} - -
diff --git a/hadoop-hdds/docs/content/start/docker.png b/hadoop-hdds/docs/content/start/docker.png deleted file mode 100644 index 048730b23d0c3..0000000000000 Binary files a/hadoop-hdds/docs/content/start/docker.png and /dev/null differ diff --git a/hadoop-hdds/docs/content/start/hadoop.png b/hadoop-hdds/docs/content/start/hadoop.png deleted file mode 100644 index 183867ca0e96e..0000000000000 Binary files a/hadoop-hdds/docs/content/start/hadoop.png and /dev/null differ diff --git a/hadoop-hdds/docs/content/start/k8s.png b/hadoop-hdds/docs/content/start/k8s.png deleted file mode 100644 index 5fa2e9a90a547..0000000000000 Binary files a/hadoop-hdds/docs/content/start/k8s.png and /dev/null differ diff --git a/hadoop-hdds/docs/content/start/minikube.png b/hadoop-hdds/docs/content/start/minikube.png deleted file mode 100644 index 0609eccc74fef..0000000000000 Binary files a/hadoop-hdds/docs/content/start/minikube.png and /dev/null differ diff --git a/hadoop-hdds/docs/content/tools/AuditParser.md b/hadoop-hdds/docs/content/tools/AuditParser.md deleted file mode 100644 index e4da208ed8515..0000000000000 --- a/hadoop-hdds/docs/content/tools/AuditParser.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: "Audit Parser" -date: 2018-12-17 -summary: Audit Parser tool can be used for querying the ozone audit logs. ---- - - -Audit Parser tool can be used for querying the ozone audit logs. -This tool creates a sqllite database at the specified path. If the database -already exists, it will avoid creating a database. - -The database contains only one table called `audit` defined as: - -CREATE TABLE IF NOT EXISTS audit ( -datetime text, -level varchar(7), -logger varchar(7), -user text, -ip text, -op text, -params text, -result varchar(7), -exception text, -UNIQUE(datetime,level,logger,user,ip,op,params,result)) - -Usage: -{{< highlight bash >}} -ozone auditparser [COMMAND] [PARAM] -{{< /highlight >}} - -To load an audit log to database: -{{< highlight bash >}} -ozone auditparser load -{{< /highlight >}} -Load command creates the audit table described above. - -To run a custom read-only query: -{{< highlight bash >}} -ozone auditparser query s in some browsers, due to the limited stylability of `s in IE10+.\n &::-ms-expand {\n background-color: transparent;\n border: 0;\n }\n\n // Disabled and read-only inputs\n //\n // HTML5 says that controls under a fieldset > legend:first-child won't be\n // disabled if the fieldset is disabled. Due to implementation difficulty, we\n // don't honor that edge case; we style them as disabled anyway.\n &[disabled],\n &[readonly],\n fieldset[disabled] & {\n background-color: @input-bg-disabled;\n opacity: 1; // iOS fix for unreadable disabled content; see https://github.com/twbs/bootstrap/issues/11655\n }\n\n &[disabled],\n fieldset[disabled] & {\n cursor: @cursor-disabled;\n }\n\n // Reset height for `textarea`s\n textarea& {\n height: auto;\n }\n}\n\n\n// Special styles for iOS temporal inputs\n//\n// In Mobile Safari, setting `display: block` on temporal inputs causes the\n// text within the input to become vertically misaligned. As a workaround, we\n// set a pixel line-height that matches the given height of the input, but only\n// for Safari. See https://bugs.webkit.org/show_bug.cgi?id=139848\n//\n// Note that as of 9.3, iOS doesn't support `week`.\n\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n input[type=\"date\"],\n input[type=\"time\"],\n input[type=\"datetime-local\"],\n input[type=\"month\"] {\n &.form-control {\n line-height: @input-height-base;\n }\n\n &.input-sm,\n .input-group-sm & {\n line-height: @input-height-small;\n }\n\n &.input-lg,\n .input-group-lg & {\n line-height: @input-height-large;\n }\n }\n}\n\n\n// Form groups\n//\n// Designed to help with the organization and spacing of vertical forms. For\n// horizontal forms, use the predefined grid classes.\n\n.form-group {\n margin-bottom: @form-group-margin-bottom;\n}\n\n\n// Checkboxes and radios\n//\n// Indent the labels to position radios/checkboxes as hanging controls.\n\n.radio,\n.checkbox {\n position: relative;\n display: block;\n margin-top: 10px;\n margin-bottom: 10px;\n\n // These are used on elements with